diff --git a/.gitignore b/.gitignore
index b02d5f9..38093e4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -33,4 +33,8 @@ __pycache__
# No wandb outputs or runs
**/wandb/
-**/runs/
\ No newline at end of file
+**/runs/
+
+mirage/mirage/ros_ws/build/*
+mirage/mirage/ros_ws/install/*
+mirage/mirage/ros_ws/log/*
\ No newline at end of file
diff --git a/README.md b/README.md
index 660750e..2760463 100644
--- a/README.md
+++ b/README.md
@@ -22,31 +22,66 @@ Clone the repo:
git clone --recurse-submodules git@github.com:BerkeleyAutomation/mirage.git
```
-Create a conda environment or virtualenv:
-```
-conda create --name mirage python=3.8
-```
+Install ROS 2 and setup the Gazebo Environment for Inpainting by following the instructions in `mirage/mirage/ros_ws/src/README.md`
-Install the mirage Python package.
+Create a conda environment and install the mirage Python package.
```
+conda create -n mirage --python=3.10
cd mirage
pip install -e .
```
-Follow the installation instructions for the given simulator by going into the forked repositories.
+Follow the installation instructions for the downloaded robomimic, robosuite, and mimicgen_environments submodules by going into them and looking at the README.
+
+Build the ROS workspace by running (from the root of this repo)
+```
+cd mirage/mirage/ros_ws
+colcon build
+source install/setup.bash
+```
## Usage
-For robosuite, to run an experiment,
+### Robosuite Benchmark
+If inpainting is enabled, only 1 benchmarking process can be run at a given time.
+Depending on the robosuite environment, the ros launch file will be different due to different camera extrinsics.
+Firstly, in one terminal, run the gazebo writer node
+```
+source mirage/mirage/ros_ws/install/setup.bash
+ros2 run gazebo_env write_data_node_robosuite_better.py
+```
+
+Then, launch the gazebo process for the corresponding environment by first running (in a new terminal):
+```
+source mirage/mirage/ros_ws/install/setup.bash
+```
+Run either one of these depending on the environment (three piece assembly is really two piece assembly).
+```
+ros2 launch gazebo_env panda_gazebo_classic_robosuite_can.launch.py
+ros2 launch gazebo_env panda_gazebo_classic_robosuite_lift_square_stack_three_threading.launch.py
+ros2 launch gazebo_env panda_gazebo_classic_robosuite_three_piece_assembly.launch.py
```
-cd mirage/benchmark/robosuite
-python3 run_robosuite_benchmark.py --config_file config/example_config.yaml
+
+For robosuite, to run an experiment (from the root of this repo),
+```
+# Run this command below if inpainting, otherwise skip
+source mirage/mirage/ros_ws/install/setup.bash
+
+cd mirage/mirage/benchmark/robosuite
+python3 run_robosuite_benchmark.py --config config/example_config.yaml
```
Please take a look at the example_config and the different parameters that can be set to run different tasks, agents, and robots. For the above code to work, you must change the agents to the path for the model checkpoints in robosuite.
## Citation
If you utilized the benchmark, please consider citing the paper:
```
-TODO: Add link to ArXiv / publication
+@misc{chen2024mirage,
+ title={Mirage: Cross-Embodiment Zero-Shot Policy Transfer with Cross-Painting},
+ author={Lawrence Yunliang Chen and Kush Hari and Karthik Dharmarajan and Chenfeng Xu and Quan Vuong and Ken Goldberg},
+ year={2024},
+ eprint={2402.19249},
+ archivePrefix={arXiv},
+ primaryClass={cs.RO}
+}
```
## Contributing
diff --git a/mirage/mirage/benchmark/robosuite/evaluate_policy_demo_source_robot_server.py b/mirage/mirage/benchmark/robosuite/evaluate_policy_demo_source_robot_server.py
index 026a1df..73176a2 100644
--- a/mirage/mirage/benchmark/robosuite/evaluate_policy_demo_source_robot_server.py
+++ b/mirage/mirage/benchmark/robosuite/evaluate_policy_demo_source_robot_server.py
@@ -155,6 +155,7 @@ def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=Non
self.rollout_horizon = config.experiment.rollout.horizon
self.core_env = self.env.env
+
self.is_diffusion = False
while hasattr(self.core_env, "env"):
self.is_diffusion = True
diff --git a/mirage/mirage/benchmark/robosuite/evaluate_policy_demo_target_robot_client.py b/mirage/mirage/benchmark/robosuite/evaluate_policy_demo_target_robot_client.py
index ebd5d45..d756e0a 100644
--- a/mirage/mirage/benchmark/robosuite/evaluate_policy_demo_target_robot_client.py
+++ b/mirage/mirage/benchmark/robosuite/evaluate_policy_demo_target_robot_client.py
@@ -52,6 +52,17 @@ def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=Non
else:
print("NOT USING ROS! Will use the groundtruth Franka masks instead")
+ self.should_kill_early = False
+
+ import threading
+ self.thread = threading.Thread(target=self.wait_for_skip)
+ self.thread.start()
+
+ def wait_for_skip(self):
+ while True:
+ input()
+ self.should_kill_early = True
+
def image_to_pointcloud(self, depth_map, camera_name, camera_height=84, camera_width=84, segmask=None):
"""
Convert depth image to point cloud
@@ -205,6 +216,14 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o
from mirage.infra.ros_inpaint_publisher_sim import ROSInpaintSimData
eef_pose = self.compute_eef_pose()
eef_pose_matrix = T.pose2mat((eef_pose[:3], eef_pose[3:]))
+
+ # For some reason the Can / Pick and Place task is very weird
+ # It has a y offset of -0.1 for the base of the robot, so there needs to be a
+ # 0.1 offset for the ee to match ground truth, this needs more investigation....
+ from robosuite.environments.manipulation.pick_place import PickPlace
+ if isinstance(self.core_env, PickPlace):
+ eef_pose_matrix[1] += 0.1
+
data = ROSInpaintSimData(ros_rgb_img, ros_depth_img, ros_segmentation_mask, eef_pose_matrix, obs['robot0_gripper_qpos'][-1:])
print("Joints including gripper", sent_joint_angles)
self.ros_inpaint_publisher.publish_to_ros_node(data)
@@ -340,6 +359,11 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o
if step_i - source_finished_step >= 10:
done = True
+ # If human hits enter, kill off the run, it isn't feasible
+ if self.should_kill_early:
+ self.should_kill_early = False
+ done = True
+
# tell the source robot that the target robot is ready
# Create an instance of Data() to send to client.
variable = Data()
diff --git a/mirage/mirage/benchmark/robosuite/robosuite_experiment.py b/mirage/mirage/benchmark/robosuite/robosuite_experiment.py
index a7a6389..6912a49 100644
--- a/mirage/mirage/benchmark/robosuite/robosuite_experiment.py
+++ b/mirage/mirage/benchmark/robosuite/robosuite_experiment.py
@@ -98,7 +98,6 @@ def launch(self, override=False) -> None:
target_agent_args.append("--inpaint_enabled")
if self._config.use_ros:
- source_agent_args.append("--use_ros")
target_agent_args.append("--use_ros")
if self._config.offline_eval:
diff --git a/mirage/mirage/gripper_interpolation/robosuite/gripper_interpolator.py b/mirage/mirage/gripper_interpolation/robosuite/gripper_interpolator.py
index 517e70b..84094f7 100644
--- a/mirage/mirage/gripper_interpolation/robosuite/gripper_interpolator.py
+++ b/mirage/mirage/gripper_interpolation/robosuite/gripper_interpolator.py
@@ -50,8 +50,12 @@ def interpolate_gripper(self, gripper_angles: np.array) -> np.array:
"""
if self.source_robot == self.target_robot:
print("Returning angles")
- return 1 * gripper_angles
+ return -1 * gripper_angles
+ if (self.source_robot == "Panda" or self.source_robot == "Kinova3") and self.target_robot == "UR5e":
+ # TODO: add this for real gripper mapping
+ return -0.05702400673569841 * gripper_angles + 0.02670973458948458
+
relevant_interpoolator = self.interpolators[(self.source_robot, self.target_robot)]
regression_model = LinearRegression()
diff --git a/mirage/mirage/infra/ros_inpaint_publisher_sim.py b/mirage/mirage/infra/ros_inpaint_publisher_sim.py
index 8651ed9..279cc5c 100644
--- a/mirage/mirage/infra/ros_inpaint_publisher_sim.py
+++ b/mirage/mirage/infra/ros_inpaint_publisher_sim.py
@@ -1,6 +1,6 @@
from input_filenames_msg.msg import InputFilesSimData
from mirage.infra.ros_inpaint_publisher import ROSInpaintPublisher
-from mirage.mirage.gripper_interpolation.gripper_interpolator import GripperInterpolator
+from mirage.gripper_interpolation.robosuite.gripper_interpolator import GripperInterpolator
import numpy as np
class ROSInpaintSimData:
@@ -30,7 +30,7 @@ class ROSInpaintPublisherSim(ROSInpaintPublisher):
to a node that performs inpainting on a target robot.
"""
- def __init__(self, source_robot_info: str, target_robot_info: str):
+ def __init__(self):
"""
Initializes the ROS2 node.
:param source_robot_info: the information about the source robot to determine which interpolation scheme to use
@@ -41,11 +41,8 @@ def __init__(self, source_robot_info: str, target_robot_info: str):
self._publisher = self.node.create_publisher(
InputFilesSimData, '/input_files_data_sim', 1)
- # TODO: generalize this
- self.gripper_interpolator = GripperInterpolator(source_robot_info, target_robot_info, ['/home/mirage/x-embody/xembody/xembody_robosuite/paired_trajectories_collection/gripper_interpolation_results_no_task_diff.pkl',
- '/home/mirage/x-embody/xembody/xembody_robosuite/paired_trajectories_collection/gripper_interpolation_results_20_rollouts.pkl'])
- # self.gripper_interpolator = GripperInterpolator('panda', 'panda', ['/home/mirage/x-embody/xembody/xembody_robosuite/paired_trajectories_collection/gripper_interpolation_results_no_task_diff.pkl'])
- # self.gripper_interpolator = GripperInterpolator('panda', 'ur5', '/home/mirage/x-embody/xembody/xembody_robosuite/paired_trajectories_collection/gripper_interpolation_results_no_task_diff.pkl')
+ # TODO(kdharmarajandev): generalize this
+ self.gripper_interpolator = GripperInterpolator('Panda', 'Panda')
def publish_to_ros_node(self, data: ROSInpaintSimData):
"""
diff --git a/mirage/mirage/ros_ws/src/README.md b/mirage/mirage/ros_ws/src/README.md
index 22cda73..39867d2 100644
--- a/mirage/mirage/ros_ws/src/README.md
+++ b/mirage/mirage/ros_ws/src/README.md
@@ -1,8 +1,81 @@
-# Cross Embodiment
+# Mirage
-Hi. Welcome to this test workspace for the cross embodiment project. This is currently in development.
+Hi! Welcome to the ROS workspace setup for Mirage. This README details how to setup the ROS2 environment and Gazebo to run simulated and real experiments in the Mirage paper (RSS 2024 hopefully ;)
-## Installation
+## Prerequisites
+Computer with Ubuntu 22.04
+
+## Installation (Bash Script)
+Clone the repo:
+```
+cd ~/
+git clone --recurse-submodules git@github.com:BerkeleyAutomation/mirage.git
+```
+
+Run the setup bash script
+```
+cd ~/mirage/xembody/xembody/src/ros_ws/src
+bash ros_gazebo_setup.bash
+```
+
+## Installation (Step by Step)
+
+### Step 1: Install ROS2 Humble Full: https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html
+```
+# Step 1.1 Set Locale
+sudo apt update -y
+sudo apt install locales
+sudo locale-gen en_US en_US.UTF-8
+sudo update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
+export LANG=en_US.UTF-8
+
+# Step 1.2 Setup Sources
+sudo apt install software-properties-common
+echo -ne '\n' | sudo add-apt-repository universe
+sudo apt update && sudo apt install curl -y
+sudo curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg
+echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(. /etc/os-release && echo $UBUNTU_CODENAME) main" | sudo tee /etc/apt/sources.list.d/ros2.list > /dev/null
+
+# Step 1.3 Install ROS2 Packages
+sudo apt update -y
+sudo apt upgrade -y
+sudo apt install ros-humble-desktop-full -y
+echo "source /opt/ros/humble/setup.bash" >> ~/.bashrc
+```
+
+### Step 2: Setup ROS2 Control: https://control.ros.org/humble/doc/ros2_control_demos/doc/index.html
+```
+mkdir -p ~/ros2_ws/src
+cd ~/ros2_ws/src
+git clone https://github.com/ros-controls/ros2_control_demos -b humble
+cd ~/ros2_ws/
+sudo apt-get update
+sudo rosdep install --from-paths ./ -i -y --rosdistro ${ROS_DISTRO}
+cd ~/ros2_ws/
+. /opt/ros/${ROS_DISTRO}/setup.sh
+colcon build --merge-install
+```
+
+### Step 3: Tracikpy Installation: https://github.com/mjd3/tracikpy
+```
+sudo apt-get install libeigen3-dev liborocos-kdl-dev libkdl-parser-dev liburdfdom-dev libnlopt-dev
+cd ~/
+git clone https://github.com/mjd3/tracikpy.git
+pip install tracikpy/
+```
+
+### Step 4: Make sure Gazebo plugins can be found: https://classic.gazebosim.org/tutorials?tut=guided_i5
+```
+echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/mirage/xembody/xembody/src/ros_ws/build/gazebo_env" >> ~/.bashrc
+```
+
+### Step 5: Compile workspace
+```
+cd ~/mirage/xembody/xembody/src/ros_ws
+colcon build
+. install/setup.bash
+source ~/.bashrc
+```
When doing this installation, have conda fully deactivated
Make sure ROS2 Humble Desktop Full is installed: https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/bringup/config/rrbot_controllers.yaml b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/bringup/config/rrbot_controllers.yaml
deleted file mode 100644
index e98fce3..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/bringup/config/rrbot_controllers.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-controller_manager:
- ros__parameters:
- update_rate: 10 # Hz
-
- joint_state_broadcaster:
- type: joint_state_broadcaster/JointStateBroadcaster
-
- forward_position_controller:
- type: forward_command_controller/ForwardCommandController
-
- joint_trajectory_position_controller:
- type: joint_trajectory_controller/JointTrajectoryController
-
-
-forward_position_controller:
- ros__parameters:
- joints:
- - joint1
- - joint2
- interface_name: position
-
-
-joint_trajectory_position_controller:
- ros__parameters:
- joints:
- - joint1
- - joint2
-
- command_interfaces:
- - position
-
- state_interfaces:
- - position
-
- action_monitor_rate: 20.0 # Defaults to 20
-
- allow_partial_joints_goal: false # Defaults to false
- open_loop_control: true
- allow_integration_in_goal_trajectories: true
- constraints:
- stopped_velocity_tolerance: 0.01 # Defaults to 0.01
- goal_time: 0.0 # Defaults to 0.0 (start immediately)
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/bringup/launch/rrbot.launch.py b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/bringup/launch/rrbot.launch.py
deleted file mode 100644
index ba10319..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/bringup/launch/rrbot.launch.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2021 Stogl Robotics Consulting UG (haftungsbeschränkt)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, RegisterEventHandler
-from launch.conditions import IfCondition
-from launch.event_handlers import OnProcessExit
-from launch.substitutions import Command, FindExecutable, LaunchConfiguration, PathJoinSubstitution
-
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="true",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [
- FindPackageShare("fk_ik_rviz_pkg"),
- "urdf",
- "rrbot.urdf.xacro",
- ]
- ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- robot_controllers = PathJoinSubstitution(
- [
- FindPackageShare("fk_ik_rviz_pkg"),
- "config",
- "rrbot_controllers.yaml",
- ]
- )
- rviz_config_file = PathJoinSubstitution(
- [FindPackageShare("fk_ik_rviz_pkg"), "rviz", "rrbot.rviz"]
- )
-
- control_node = Node(
- package="controller_manager",
- executable="ros2_control_node",
- parameters=[robot_description, robot_controllers],
- output="both",
- )
- robot_state_pub_node = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="both",
- parameters=[robot_description],
- )
- rviz_node = Node(
- package="rviz2",
- executable="rviz2",
- name="rviz2",
- output="log",
- arguments=["-d", rviz_config_file],
- condition=IfCondition(gui),
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- # Delay rviz start after `joint_state_broadcaster`
- delay_rviz_after_joint_state_broadcaster_spawner = RegisterEventHandler(
- event_handler=OnProcessExit(
- target_action=joint_state_broadcaster_spawner,
- on_exit=[rviz_node],
- )
- )
-
- # Delay start of robot_controller after `joint_state_broadcaster`
- delay_robot_controller_spawner_after_joint_state_broadcaster_spawner = RegisterEventHandler(
- event_handler=OnProcessExit(
- target_action=joint_state_broadcaster_spawner,
- on_exit=[robot_controller_spawner],
- )
- )
-
- nodes = [
- control_node,
- robot_state_pub_node,
- joint_state_broadcaster_spawner,
- delay_rviz_after_joint_state_broadcaster_spawner,
- delay_robot_controller_spawner_after_joint_state_broadcaster_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/ros2_control/rrbot.ros2_control.xacro b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/ros2_control/rrbot.ros2_control.xacro
deleted file mode 100644
index 7bcfa48..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/ros2_control/rrbot.ros2_control.xacro
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
-
-
-
-
- fk_ik_rviz_pkg/RRBotSystemPositionOnlyHardware
- 0
- 3.0
- 100
-
-
-
-
- -1
- 1
-
-
-
-
-
- -1
- 1
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/rviz/rrbot.rviz b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/rviz/rrbot.rviz
deleted file mode 100644
index 8b2b25b..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/rviz/rrbot.rviz
+++ /dev/null
@@ -1,196 +0,0 @@
-Panels:
- - Class: rviz_common/Displays
- Help Height: 138
- Name: Displays
- Property Tree Widget:
- Expanded:
- - /TF1
- Splitter Ratio: 0.5
- Tree Height: 975
- - Class: rviz_common/Selection
- Name: Selection
- - Class: rviz_common/Tool Properties
- Expanded:
- - /2D Goal Pose1
- - /Publish Point1
- Name: Tool Properties
- Splitter Ratio: 0.5886790156364441
- - Class: rviz_common/Views
- Expanded:
- - /Current View1
- Name: Views
- Splitter Ratio: 0.5
-Visualization Manager:
- Class: ""
- Displays:
- - Alpha: 0.5
- Cell Size: 1
- Class: rviz_default_plugins/Grid
- Color: 160; 160; 164
- Enabled: true
- Line Style:
- Line Width: 0.029999999329447746
- Value: Lines
- Name: Grid
- Normal Cell Count: 0
- Offset:
- X: 0
- Y: 0
- Z: 0
- Plane: XY
- Plane Cell Count: 10
- Reference Frame:
- Value: true
- - Alpha: 1
- Class: rviz_default_plugins/RobotModel
- Collision Enabled: false
- Description File: ""
- Description Source: Topic
- Description Topic:
- Depth: 5
- Durability Policy: Volatile
- History Policy: Keep Last
- Reliability Policy: Reliable
- Value: /robot_description
- Enabled: true
- Links:
- All Links Enabled: true
- Expand Joint Details: false
- Expand Link Details: false
- Expand Tree: false
- Link Tree Style: Links in Alphabetic Order
- base_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- link1:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- link2:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- world:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Mass Properties:
- Inertia: false
- Mass: false
- Name: RobotModel
- TF Prefix: ""
- Update Interval: 0
- Value: true
- Visual Enabled: true
- - Class: rviz_default_plugins/TF
- Enabled: true
- Frame Timeout: 15
- Frames:
- All Enabled: true
- base_link:
- Value: true
- link1:
- Value: true
- link2:
- Value: true
- world:
- Value: true
- Marker Scale: 1
- Name: TF
- Show Arrows: true
- Show Axes: true
- Show Names: true
- Tree:
- world:
- base_link:
- link1:
- link2:
- {}
- Update Interval: 0
- Value: true
- Enabled: true
- Global Options:
- Background Color: 48; 48; 48
- Fixed Frame: base_link
- Frame Rate: 30
- Name: root
- Tools:
- - Class: rviz_default_plugins/Interact
- Hide Inactive Objects: true
- - Class: rviz_default_plugins/MoveCamera
- - Class: rviz_default_plugins/Select
- - Class: rviz_default_plugins/FocusCamera
- - Class: rviz_default_plugins/Measure
- Line color: 128; 128; 0
- - Class: rviz_default_plugins/SetInitialPose
- Covariance x: 0.25
- Covariance y: 0.25
- Covariance yaw: 0.06853891909122467
- Topic:
- Depth: 5
- Durability Policy: Volatile
- History Policy: Keep Last
- Reliability Policy: Reliable
- Value: /initialpose
- - Class: rviz_default_plugins/SetGoal
- Topic:
- Depth: 5
- Durability Policy: Volatile
- History Policy: Keep Last
- Reliability Policy: Reliable
- Value: /goal_pose
- - Class: rviz_default_plugins/PublishPoint
- Single click: true
- Topic:
- Depth: 5
- Durability Policy: Volatile
- History Policy: Keep Last
- Reliability Policy: Reliable
- Value: /clicked_point
- Transformation:
- Current:
- Class: rviz_default_plugins/TF
- Value: true
- Views:
- Current:
- Class: rviz_default_plugins/Orbit
- Distance: 8.443930625915527
- Enable Stereo Rendering:
- Stereo Eye Separation: 0.05999999865889549
- Stereo Focal Distance: 1
- Swap Stereo Eyes: false
- Value: false
- Focal Point:
- X: 0.0044944556429982185
- Y: 1.0785865783691406
- Z: 2.4839563369750977
- Focal Shape Fixed Size: true
- Focal Shape Size: 0.05000000074505806
- Invert Z Axis: false
- Name: Current View
- Near Clip Distance: 0.009999999776482582
- Pitch: 0.17039914429187775
- Target Frame:
- Value: Orbit (rviz)
- Yaw: 0.9472349286079407
- Saved: ~
-Window Geometry:
- Displays:
- collapsed: false
- Height: 1379
- Hide Left Dock: false
- Hide Right Dock: false
- QMainWindow State: 000000ff00000000fd0000000400000000000001f7000004c7fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b000000b000fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000006e000004c70000018200fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000015f000004c7fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000006e000004c70000013200fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d0065010000000000000450000000000000000000000692000004c700000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
- Selection:
- collapsed: false
- Tool Properties:
- collapsed: false
- Views:
- collapsed: false
- Width: 2560
- X: 585
- Y: 358
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/urdf/rrbot.materials.xacro b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/urdf/rrbot.materials.xacro
deleted file mode 100644
index eb8e021..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/urdf/rrbot.materials.xacro
+++ /dev/null
@@ -1,44 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/urdf/rrbot.urdf.xacro b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/urdf/rrbot.urdf.xacro
deleted file mode 100644
index 007540b..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/urdf/rrbot.urdf.xacro
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/urdf/rrbot_description.urdf.xacro b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/urdf/rrbot_description.urdf.xacro
deleted file mode 100644
index 220c2c8..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/description/urdf/rrbot_description.urdf.xacro
+++ /dev/null
@@ -1,127 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/fk_ik_rviz_pkg.xml b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/fk_ik_rviz_pkg.xml
deleted file mode 100644
index c9653a2..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/fk_ik_rviz_pkg.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
- The ros2_control RRbot example using a system hardware interface-type.
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/fk_ik_rviz_pkg/__init__.py b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/fk_ik_rviz_pkg/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/fk_ik_rviz_pkg/module_to_import.py b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/fk_ik_rviz_pkg/module_to_import.py
deleted file mode 100644
index e69de29..0000000
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/hardware/include/fk_ik_rviz_pkg/rrbot.hpp b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/hardware/include/fk_ik_rviz_pkg/rrbot.hpp
deleted file mode 100644
index c168553..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/hardware/include/fk_ik_rviz_pkg/rrbot.hpp
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2020 ros2_control Development Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef FK_IK_RVIZ_PKG__RRBOT_HPP_
-#define FK_IK_RVIZ_PKG__RRBOT_HPP_
-
-#include
-#include
-#include
-
-#include "hardware_interface/handle.hpp"
-#include "hardware_interface/hardware_info.hpp"
-#include "hardware_interface/system_interface.hpp"
-#include "hardware_interface/types/hardware_interface_return_values.hpp"
-#include "rclcpp/macros.hpp"
-#include "rclcpp_lifecycle/node_interfaces/lifecycle_node_interface.hpp"
-#include "rclcpp_lifecycle/state.hpp"
-#include "fk_ik_rviz_pkg/visibility_control.h"
-
-namespace fk_ik_rviz_pkg
-{
-class RRBotSystemPositionOnlyHardware : public hardware_interface::SystemInterface
-{
-public:
- RCLCPP_SHARED_PTR_DEFINITIONS(RRBotSystemPositionOnlyHardware);
-
- FK_IK_RVIZ_PKG_PUBLIC
- hardware_interface::CallbackReturn on_init(
- const hardware_interface::HardwareInfo & info) override;
-
- FK_IK_RVIZ_PKG_PUBLIC
- hardware_interface::CallbackReturn on_configure(
- const rclcpp_lifecycle::State & previous_state) override;
-
- FK_IK_RVIZ_PKG_PUBLIC
- std::vector export_state_interfaces() override;
-
- FK_IK_RVIZ_PKG_PUBLIC
- std::vector export_command_interfaces() override;
-
- FK_IK_RVIZ_PKG_PUBLIC
- hardware_interface::CallbackReturn on_activate(
- const rclcpp_lifecycle::State & previous_state) override;
-
- FK_IK_RVIZ_PKG_PUBLIC
- hardware_interface::CallbackReturn on_deactivate(
- const rclcpp_lifecycle::State & previous_state) override;
-
- FK_IK_RVIZ_PKG_PUBLIC
- hardware_interface::return_type read(
- const rclcpp::Time & time, const rclcpp::Duration & period) override;
-
- FK_IK_RVIZ_PKG_PUBLIC
- hardware_interface::return_type write(
- const rclcpp::Time & time, const rclcpp::Duration & period) override;
-
-private:
- // Parameters for the RRBot simulation
- double hw_start_sec_;
- double hw_stop_sec_;
- double hw_slowdown_;
-
- // Store the command for the simulated robot
- std::vector hw_commands_;
- std::vector hw_states_;
-};
-
-} // namespace fk_ik_rviz_pkg
-
-#endif // fk_ik_rviz_pkg__RRBOT_HPP_
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/hardware/include/fk_ik_rviz_pkg/visibility_control.h b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/hardware/include/fk_ik_rviz_pkg/visibility_control.h
deleted file mode 100644
index ff3f6ea..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/hardware/include/fk_ik_rviz_pkg/visibility_control.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2021 ros2_control Development Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/* This header must be included by all rclcpp headers which declare symbols
- * which are defined in the rclcpp library. When not building the rclcpp
- * library, i.e. when using the headers in other package's code, the contents
- * of this header change the visibility of certain symbols which the rclcpp
- * library cannot have, but the consuming code must have inorder to link.
- */
-
-#ifndef FK_IK_RVIZ_PKG__VISIBILITY_CONTROL_H_
-#define FK_IK_RVIZ_PKG__VISIBILITY_CONTROL_H_
-
-// This logic was borrowed (then namespaced) from the examples on the gcc wiki:
-// https://gcc.gnu.org/wiki/Visibility
-
-#if defined _WIN32 || defined __CYGWIN__
-#ifdef __GNUC__
-#define FK_IK_RVIZ_PKG_EXPORT __attribute__((dllexport))
-#define FK_IK_RVIZ_PKG_IMPORT __attribute__((dllimport))
-#else
-#define FK_IK_RVIZ_PKG_EXPORT __declspec(dllexport)
-#define FK_IK_RVIZ_PKG_IMPORT __declspec(dllimport)
-#endif
-#ifdef FK_IK_RVIZ_PKG_BUILDING_DLL
-#define FK_IK_RVIZ_PKG_PUBLIC FK_IK_RVIZ_PKG_EXPORT
-#else
-#define FK_IK_RVIZ_PKG_PUBLIC FK_IK_RVIZ_PKG_IMPORT
-#endif
-#define FK_IK_RVIZ_PKG_PUBLIC_TYPE FK_IK_RVIZ_PKG_PUBLIC
-#define FK_IK_RVIZ_PKG_LOCAL
-#else
-#define FK_IK_RVIZ_PKG_EXPORT __attribute__((visibility("default")))
-#define FK_IK_RVIZ_PKG_IMPORT
-#if __GNUC__ >= 4
-#define FK_IK_RVIZ_PKG_PUBLIC __attribute__((visibility("default")))
-#define FK_IK_RVIZ_PKG_LOCAL __attribute__((visibility("hidden")))
-#else
-#define FK_IK_RVIZ_PKG_PUBLIC
-#define FK_IK_RVIZ_PKG_LOCAL
-#endif
-#define FK_IK_RVIZ_PKG_PUBLIC_TYPE
-#endif
-
-#endif // FK_IK_RVIZ_PKG__VISIBILITY_CONTROL_H_
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/hardware/rrbot.cpp b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/hardware/rrbot.cpp
deleted file mode 100644
index 3dd7e98..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/hardware/rrbot.cpp
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2020 ros2_control Development Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fk_ik_rviz_pkg/rrbot.hpp"
-
-#include
-#include
-#include
-#include
-#include
-
-#include "hardware_interface/types/hardware_interface_type_values.hpp"
-#include "rclcpp/rclcpp.hpp"
-
-namespace fk_ik_rviz_pkg
-{
-hardware_interface::CallbackReturn RRBotSystemPositionOnlyHardware::on_init(
- const hardware_interface::HardwareInfo & info)
-{
- if (
- hardware_interface::SystemInterface::on_init(info) !=
- hardware_interface::CallbackReturn::SUCCESS)
- {
- return hardware_interface::CallbackReturn::ERROR;
- }
-
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- hw_start_sec_ = stod(info_.hardware_parameters["example_param_hw_start_duration_sec"]);
- hw_stop_sec_ = stod(info_.hardware_parameters["example_param_hw_stop_duration_sec"]);
- hw_slowdown_ = stod(info_.hardware_parameters["example_param_hw_slowdown"]);
- // END: This part here is for exemplary purposes - Please do not copy to your production code
- hw_states_.resize(info_.joints.size(), std::numeric_limits::quiet_NaN());
- hw_commands_.resize(info_.joints.size(), std::numeric_limits::quiet_NaN());
-
- for (const hardware_interface::ComponentInfo & joint : info_.joints)
- {
- // RRBotSystemPositionOnly has exactly one state and command interface on each joint
- if (joint.command_interfaces.size() != 1)
- {
- RCLCPP_FATAL(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"),
- "Joint '%s' has %zu command interfaces found. 1 expected.", joint.name.c_str(),
- joint.command_interfaces.size());
- return hardware_interface::CallbackReturn::ERROR;
- }
-
- if (joint.command_interfaces[0].name != hardware_interface::HW_IF_POSITION)
- {
- RCLCPP_FATAL(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"),
- "Joint '%s' have %s command interfaces found. '%s' expected.", joint.name.c_str(),
- joint.command_interfaces[0].name.c_str(), hardware_interface::HW_IF_POSITION);
- return hardware_interface::CallbackReturn::ERROR;
- }
-
- if (joint.state_interfaces.size() != 1)
- {
- RCLCPP_FATAL(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"),
- "Joint '%s' has %zu state interface. 1 expected.", joint.name.c_str(),
- joint.state_interfaces.size());
- return hardware_interface::CallbackReturn::ERROR;
- }
-
- if (joint.state_interfaces[0].name != hardware_interface::HW_IF_POSITION)
- {
- RCLCPP_FATAL(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"),
- "Joint '%s' have %s state interface. '%s' expected.", joint.name.c_str(),
- joint.state_interfaces[0].name.c_str(), hardware_interface::HW_IF_POSITION);
- return hardware_interface::CallbackReturn::ERROR;
- }
- }
-
- return hardware_interface::CallbackReturn::SUCCESS;
-}
-
-hardware_interface::CallbackReturn RRBotSystemPositionOnlyHardware::on_configure(
- const rclcpp_lifecycle::State & /*previous_state*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Configuring ...please wait...");
-
- for (int i = 0; i < hw_start_sec_; i++)
- {
- rclcpp::sleep_for(std::chrono::seconds(1));
- RCLCPP_INFO(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "%.1f seconds left...",
- hw_start_sec_ - i);
- }
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- // reset values always when configuring hardware
- for (uint i = 0; i < hw_states_.size(); i++)
- {
- hw_states_[i] = 0;
- hw_commands_[i] = 0;
- }
-
- RCLCPP_INFO(rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Successfully configured!");
-
- return hardware_interface::CallbackReturn::SUCCESS;
-}
-
-std::vector
-RRBotSystemPositionOnlyHardware::export_state_interfaces()
-{
- std::vector state_interfaces;
- for (uint i = 0; i < info_.joints.size(); i++)
- {
- state_interfaces.emplace_back(hardware_interface::StateInterface(
- info_.joints[i].name, hardware_interface::HW_IF_POSITION, &hw_states_[i]));
- }
-
- return state_interfaces;
-}
-
-std::vector
-RRBotSystemPositionOnlyHardware::export_command_interfaces()
-{
- std::vector command_interfaces;
- for (uint i = 0; i < info_.joints.size(); i++)
- {
- command_interfaces.emplace_back(hardware_interface::CommandInterface(
- info_.joints[i].name, hardware_interface::HW_IF_POSITION, &hw_commands_[i]));
- }
-
- return command_interfaces;
-}
-
-hardware_interface::CallbackReturn RRBotSystemPositionOnlyHardware::on_activate(
- const rclcpp_lifecycle::State & /*previous_state*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Activating ...please wait...");
-
- for (int i = 0; i < hw_start_sec_; i++)
- {
- rclcpp::sleep_for(std::chrono::seconds(1));
- RCLCPP_INFO(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "%.1f seconds left...",
- hw_start_sec_ - i);
- }
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- // command and state should be equal when starting
- for (uint i = 0; i < hw_states_.size(); i++)
- {
- hw_commands_[i] = hw_states_[i];
- }
-
- RCLCPP_INFO(rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Successfully activated!");
-
- return hardware_interface::CallbackReturn::SUCCESS;
-}
-
-hardware_interface::CallbackReturn RRBotSystemPositionOnlyHardware::on_deactivate(
- const rclcpp_lifecycle::State & /*previous_state*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Deactivating ...please wait...");
-
- for (int i = 0; i < hw_stop_sec_; i++)
- {
- rclcpp::sleep_for(std::chrono::seconds(1));
- RCLCPP_INFO(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "%.1f seconds left...",
- hw_stop_sec_ - i);
- }
-
- RCLCPP_INFO(rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Successfully deactivated!");
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- return hardware_interface::CallbackReturn::SUCCESS;
-}
-
-hardware_interface::return_type RRBotSystemPositionOnlyHardware::read(
- const rclcpp::Time & /*time*/, const rclcpp::Duration & /*period*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Reading...");
-
- for (uint i = 0; i < hw_states_.size(); i++)
- {
- // Simulate RRBot's movement
- hw_states_[i] = hw_states_[i] + (hw_commands_[i] - hw_states_[i]) / hw_slowdown_;
- RCLCPP_INFO(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Got state %.5f for joint %d!",
- hw_states_[i], i);
- }
- RCLCPP_INFO(rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Joints successfully read!");
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- return hardware_interface::return_type::OK;
-}
-
-hardware_interface::return_type RRBotSystemPositionOnlyHardware::write(
- const rclcpp::Time & /*time*/, const rclcpp::Duration & /*period*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Writing...");
-
- for (uint i = 0; i < hw_commands_.size(); i++)
- {
- // Simulate sending commands to the hardware
- RCLCPP_INFO(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Got command %.5f for joint %d!",
- hw_commands_[i], i);
- }
- RCLCPP_INFO(
- rclcpp::get_logger("RRBotSystemPositionOnlyHardware"), "Joints successfully written!");
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- return hardware_interface::return_type::OK;
-}
-
-} // namespace fk_ik_rviz_pkg
-
-#include "pluginlib/class_list_macros.hpp"
-
-PLUGINLIB_EXPORT_CLASS(
- fk_ik_rviz_pkg::RRBotSystemPositionOnlyHardware, hardware_interface::SystemInterface)
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/package.xml b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/package.xml
deleted file mode 100644
index 3b8a258..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/package.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-
- fk_ik_rviz_pkg
- 0.0.0
- Demo package of `ros2_control` hardware for RRbot.
-
- Dr.-Ing. Denis Štogl
- Bence Magyar
- Christoph Froehlich
-
- Apache-2.0
-
- ament_cmake
-
- hardware_interface
- pluginlib
- rclcpp
- rclcpp_lifecycle
-
- controller_manager
- forward_command_controller
- joint_state_broadcaster
- joint_state_publisher_gui
- joint_trajectory_controller
- robot_state_publisher
- ros2_controllers_test_nodes
- ros2controlcli
- ros2launch
- rviz2
- xacro
-
- ament_cmake_gtest
-
-
- ament_cmake
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/baxter.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/baxter.urdf
deleted file mode 100644
index e90262a..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/baxter.urdf
+++ /dev/null
@@ -1,1849 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 1000
- 1000
- 0.0 0.0 1.0
- 1e5
- 1.0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 1000
- 1000
- 0.0 0.0 1.0
- 1e5
- 1.0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- transmission_interface/SimpleTransmission
-
- EffortJointInterface
-
-
- EffortJointInterface
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- EffortJointInterface
-
-
- EffortJointInterface
- 1
-
-
-
- 1
-
-
- 1
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 1000
- 1000
- 0.0 0.0 1.0
- 1e5
- 1.0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 1000
- 1000
- 0.0 0.0 1.0
- 1e5
- 1.0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- transmission_interface/SimpleTransmission
-
- EffortJointInterface
-
-
- EffortJointInterface
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- EffortJointInterface
-
-
- EffortJointInterface
- 1
-
-
-
- 1
-
-
- 1
-
-
-
-
-
-
-
-
-
- left_s0
- left_s0_cmd_vel
-
-
-
-
-
-
-
- left_s1
- left_s1_cmd_vel
-
-
-
-
-
-
-
- left_e0
- left_e0_cmd_vel
-
-
-
-
-
-
-
- left_e1
- left_e1_cmd_vel
-
-
-
-
-
-
-
- left_w0
- left_w0_cmd_vel
-
-
-
-
-
-
-
- left_w1
- left_w1_cmd_vel
-
-
-
-
-
-
-
- left_w2
- left_w2_cmd_vel
-
-
-
-
-
-
-
-
-
-
-
- 1.047
-
- 640
- 400
-
-
- 0.1
- 100
-
-
- 1
- 30
- false
- left_arm/image
-
-
-
-
-
-
- left_arm/range
- 10
-
-
-
- 10
- 1
- -0.3
- 0.3
-
-
- 10
- 1
- -0.3
- 0.3
-
-
-
- 0.15
- 0.5
- 0.01
-
-
- 1
- true
-
-
-
-
-
-
-
- right_s0
- right_s0_cmd_vel
-
-
-
-
-
-
-
- right_s1
- right_s1_cmd_vel
-
-
-
-
-
-
-
- right_e0
- right_e0_cmd_vel
-
-
-
-
-
-
-
- right_e1
- right_e1_cmd_vel
-
-
-
-
-
-
-
- right_w0
- right_w0_cmd_vel
-
-
-
-
-
-
-
- right_w1
- right_w1_cmd_vel
-
-
-
-
-
-
-
- right_w2
- right_w2_cmd_vel
-
-
-
-
-
-
-
-
-
-
-
- 1.047
-
- 640
- 400
-
-
- 0.1
- 100
-
-
- 1
- 30
- false
- right_arm/image
-
-
-
-
-
-
- right_arm/range
- 10
-
-
-
- 10
- 1
- -0.3
- 0.3
-
-
- 10
- 1
- -0.3
- 0.3
-
-
-
- 0.15
- 0.5
- 0.01
-
-
- 1
- true
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/forward_test.py b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/forward_test.py
deleted file mode 100644
index 4bc1724..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/forward_test.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import compas_fab
-from compas.robots import Configuration
-from compas_fab.backends import PyBulletClient
-import pathlib
-import time
-
-with PyBulletClient() as client:
- # TODO: Change hardcoded Python file
- file_path = pathlib.Path(__file__).parent.resolve()
- urdf_filename = compas_fab.get('../../../../../../cross_embodiment_test_ws/src/fk_ik_rviz_pkg/scripts/ur5e.urdf')
- robot = client.load_robot(urdf_filename)
-
- configuration = Configuration.from_revolute_values([0,0,0,0,0,0,0])
-
- frame_WCF = robot.forward_kinematics(configuration)
-
- # Given 4x4 matrix, it is 4th column (position), first column, and then second column
- print("Frame in the world coordinate system")
- print(frame_WCF)
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/inverse_test.py b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/inverse_test.py
deleted file mode 100644
index 6aa347b..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/inverse_test.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import compas_fab
-from compas.geometry import Frame
-from compas_fab.backends import PyBulletClient
-
-with PyBulletClient() as client:
- urdf_filename = compas_fab.get('../../../../../../cross_embodiment_test_ws/src/fk_ik_rviz_pkg/scripts/panda.urdf')
- robot = client.load_robot(urdf_filename)
-
- # Given 4x4 matrix, it is 4th column (position), first column, and then second column
- frame_WCF = Frame([0.088, 0.000, 0.926], [1,0,0], [0, -1,0])
- start_configuration = robot.zero_configuration()
-
- configuration = robot.inverse_kinematics(frame_WCF, start_configuration)
-
- print("Found configuration", configuration)
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/kinova.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/kinova.urdf
deleted file mode 100644
index 3d1f73b..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/kinova.urdf
+++ /dev/null
@@ -1,533 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/panda.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/panda.urdf
deleted file mode 100644
index 50153fa..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/panda.urdf
+++ /dev/null
@@ -1,306 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/panda_arm_hand.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/panda_arm_hand.urdf
deleted file mode 100644
index 824e988..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/panda_arm_hand.urdf
+++ /dev/null
@@ -1,286 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/pybullet_ur5_robotiq/__MACOSX/pybullet_ur5_robotiq/._.DS_Store b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/pybullet_ur5_robotiq/__MACOSX/pybullet_ur5_robotiq/._.DS_Store
deleted file mode 100644
index a5b28df..0000000
Binary files a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/pybullet_ur5_robotiq/__MACOSX/pybullet_ur5_robotiq/._.DS_Store and /dev/null differ
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/pybullet_ur5_robotiq/pybullet_ur5_robotiq/.DS_Store b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/pybullet_ur5_robotiq/pybullet_ur5_robotiq/.DS_Store
deleted file mode 100644
index 78ea35f..0000000
Binary files a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/pybullet_ur5_robotiq/pybullet_ur5_robotiq/.DS_Store and /dev/null differ
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/pybullet_ur5_robotiq/pybullet_ur5_robotiq/urdf/ur5_robotiq_85.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/pybullet_ur5_robotiq/pybullet_ur5_robotiq/urdf/ur5_robotiq_85.urdf
deleted file mode 100644
index efffa54..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/pybullet_ur5_robotiq/pybullet_ur5_robotiq/urdf/ur5_robotiq_85.urdf
+++ /dev/null
@@ -1,598 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/test_model.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/test_model.urdf
deleted file mode 100644
index bfc31d8..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/test_model.urdf
+++ /dev/null
@@ -1,124 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e.urdf
deleted file mode 100644
index 3d235ae..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e.urdf
+++ /dev/null
@@ -1,478 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- gazebo_ros2_control/GazeboSystem
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-pi}
- {pi}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
-
-
-
-
-
- /home/kushtimusprime/cross_embodiment_test_ws/install/gazebo_env/share/gazebo_env/config/urbot_controllers.yaml
-
-
-
-
-
-
-
-
- Gazebo/Orange
-
-
-
- 0.2
- 0.2
- Gazebo/Yellow
-
-
-
- 0.2
- 0.2
- Gazebo/Orange
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/.DS_Store b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/.DS_Store
deleted file mode 100644
index ebb2551..0000000
Binary files a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/.DS_Store and /dev/null differ
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/.DS_Store b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/.DS_Store
deleted file mode 100644
index 5008ddf..0000000
Binary files a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/.DS_Store and /dev/null differ
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/robotiq_2f_85_gripper.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/robotiq_2f_85_gripper.urdf
deleted file mode 100644
index 850ea2d..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/robotiq_2f_85_gripper.urdf
+++ /dev/null
@@ -1,328 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- transmission_interface/SimpleTransmission
-
- PositionJointInterface
-
-
- 1
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e.urdf
deleted file mode 100644
index 7a63ece..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e.urdf
+++ /dev/null
@@ -1,307 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e_with_camera.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e_with_camera.urdf
deleted file mode 100644
index efefe82..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e_with_camera.urdf
+++ /dev/null
@@ -1,330 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Change this to 0.01 0.01 0.01 to get rid of box!-->
-
-
- Change this to 0.01 0 0 to get rid of box!-->
-
-
-
-
-
-
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e_with_gripper.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e_with_gripper.urdf
deleted file mode 100644
index e0e7fe8..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e_with_gripper.urdf
+++ /dev/null
@@ -1,574 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e_with_robotiq_gripper.urdf b/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e_with_robotiq_gripper.urdf
deleted file mode 100644
index f9261bd..0000000
--- a/mirage/mirage/ros_ws/src/fk_ik_rviz_pkg/scripts/ur5e_description/robots/ur5e_with_robotiq_gripper.urdf
+++ /dev/null
@@ -1,650 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- transmission_interface/SimpleTransmission
-
- hardware_interface/PositionJointInterface
-
-
- 1
-
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- transmission_interface/SimpleTransmission
-
- PositionJointInterface
-
-
- 1
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/config/basic_controller.yaml b/mirage/mirage/ros_ws/src/gazebo_env/bringup/config/basic_controller.yaml
deleted file mode 100644
index 8fbf9f9..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/config/basic_controller.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-controller_manager:
- ros__parameters:
- update_rate: 10 # Hz
-
- joint_state_broadcaster:
- type: joint_state_broadcaster/JointStateBroadcaster
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/config/urbot_controllers.yaml b/mirage/mirage/ros_ws/src/gazebo_env/bringup/config/urbot_controllers.yaml
deleted file mode 100644
index 760ba78..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/config/urbot_controllers.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-controller_manager:
- ros__parameters:
- update_rate: 10 # Hz
-
- joint_state_broadcaster:
- type: joint_state_broadcaster/JointStateBroadcaster
-
- forward_position_controller:
- type: forward_command_controller/ForwardCommandController
-
-forward_position_controller:
- ros__parameters:
- joints:
- - shoulder_pan_joint
- - shoulder_lift_joint
- - elbow_joint
- - wrist_1_joint
- - wrist_2_joint
- - wrist_3_joint
- interface_name: position
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/full_ur5e.launch.xml b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/full_ur5e.launch.xml
deleted file mode 100644
index d49df0d..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/full_ur5e.launch.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/load_world_into_gazebo.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/load_world_into_gazebo.launch.py
deleted file mode 100644
index 069bbd6..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/load_world_into_gazebo.launch.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Author: Addison Sears-Collins
-# Date: September 19, 2021
-# Description: Load a world file into Gazebo.
-# https://automaticaddison.com
-
-import os
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition, UnlessCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, LaunchConfiguration, PythonExpression
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-def generate_launch_description():
-
- # Set the path to the Gazebo ROS package
- pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros')
-
- # Set the path to this package.
- pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env')
-
- # Set the path to the world file
- world_file_name = 'no_shadow.world'
- world_path = os.path.join(pkg_share, 'worlds', world_file_name)
-
- # Set the path to the SDF model files.
- gazebo_models_path = os.path.join(pkg_share, 'models')
- os.environ["GAZEBO_MODEL_PATH"] = gazebo_models_path
-
- ########### YOU DO NOT NEED TO CHANGE ANYTHING BELOW THIS LINE ##############
- # Launch configuration variables specific to simulation
- world = LaunchConfiguration('world')
-
- declare_world_cmd = DeclareLaunchArgument(
- name='world',
- default_value=world_path,
- description='Full path to the world model file to load')
-
- # Specify the actions
-
- # Start Gazebo server
- start_gazebo_server_cmd = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
- launch_arguments={'world': world}.items())
-
- # Start Gazebo client
- start_gazebo_client_cmd = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzclient.launch.py')),)
-
- # Create the launch description and populate
- ld = LaunchDescription()
-
- # Declare the launch options
- ld.add_action(declare_world_cmd)
-
- # Add any actions
- ld.add_action(start_gazebo_server_cmd)
- ld.add_action(start_gazebo_client_cmd)
-
- return ld
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_gazebo_classic_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_gazebo_classic_real.launch.py
deleted file mode 100644
index ffb57e6..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_gazebo_classic_real.launch.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-import os
-
-def generate_launch_description():
-
- # Set the path to the Gazebo ROS package
- pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros')
-
- # Set the path to this package.
- pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env')
-
- # Set the path to the world file
- world_file_name = 'no_shadow.world'
- world_path = os.path.join(pkg_share, 'worlds', world_file_name)
-
- world = LaunchConfiguration('world')
-
- declare_world_cmd = DeclareLaunchArgument(
- name='world',
- default_value=world_path,
- description='Full path to the world model file to load')
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- declared_arguments.append(declare_world_cmd)
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
- launch_arguments={'world': world}.items())
-
- #gazebo_server = IncludeLaunchDescription(
- # PythonLaunchDescriptionSource(
- # [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])],
- # launch_arguments={"world": world}.items(),
- # )
- #)
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "panda_and_panda_grippers_camera_scene_real.urdf.xacro"]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="panda_and_panda_grippers_joint_state_publisher_node.py",
- output="screen"
- )
-
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "ur5_camera_link",
- "ur5_real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "panda_camera_link",
- "panda_real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- #image_sub_node
- #joint_state_broadcaster_spawner,
- #robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_multi_gazebo_classic_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_multi_gazebo_early_inpaint_classic_real.launch.py
similarity index 82%
rename from mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_multi_gazebo_classic_real.launch.py
rename to mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_multi_gazebo_early_inpaint_classic_real.launch.py
index c50f9e2..b23e1df 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_multi_gazebo_classic_real.launch.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_multi_gazebo_early_inpaint_classic_real.launch.py
@@ -77,7 +77,7 @@ def generate_launch_description():
PathJoinSubstitution([FindExecutable(name="xacro")]),
" ",
PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "panda_and_panda_grippers_multi_camera_scene_real.urdf.xacro"]
+ [FindPackageShare("gazebo_env"), "urdf", "panda_and_panda_grippers_multi_camera_scene_early_inpaint_real.urdf.xacro"]
),
" ",
"use_gazebo_classic:=true",
@@ -104,30 +104,12 @@ def generate_launch_description():
output="screen",
)
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
joint_state_publisher_node = Node(
package="gazebo_env",
executable="panda_and_panda_grippers_joint_state_publisher_node.py",
output="screen"
)
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
nodes = [
gazebo_server,
gazebo_client,
@@ -222,9 +204,50 @@ def generate_launch_description():
# "camera_link",
],
),
- #image_sub_node
- #joint_state_broadcaster_spawner,
- #robot_controller_spawner,
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_right_camera_link",
+ "panda_no_gripper_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_left_camera_link",
+ "panda_no_gripper_left_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ )
]
return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_multi_gazebo_early_inpaint_classic_wrist_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_multi_gazebo_early_inpaint_classic_wrist_real.launch.py
new file mode 100644
index 0000000..a8913f0
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_multi_gazebo_early_inpaint_classic_wrist_real.launch.py
@@ -0,0 +1,253 @@
+# Copyright 2021 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from launch import LaunchDescription
+from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
+from launch.conditions import IfCondition
+from launch.launch_description_sources import PythonLaunchDescriptionSource
+from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
+from launch_ros.actions import Node
+from launch_ros.substitutions import FindPackageShare
+import os
+
+def generate_launch_description():
+
+ # Set the path to the Gazebo ROS package
+ pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros')
+
+ # Set the path to this package.
+ pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env')
+
+ # Set the path to the world file
+ world_file_name = 'no_shadow_wrist.world'
+ world_path = os.path.join(pkg_share, 'worlds', world_file_name)
+
+ world = LaunchConfiguration('world')
+
+ declare_world_cmd = DeclareLaunchArgument(
+ name='world',
+ default_value=world_path,
+ description='Full path to the world model file to load')
+
+ # Declare arguments
+ declared_arguments = []
+ declared_arguments.append(
+ DeclareLaunchArgument(
+ "gui",
+ default_value="false",
+ description="Start RViz2 automatically with this launch file.",
+ )
+ )
+
+ declared_arguments.append(declare_world_cmd)
+
+ # Initialize Arguments
+ gui = LaunchConfiguration("gui")
+
+ gazebo_server = IncludeLaunchDescription(
+ PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
+ launch_arguments={'world': world}.items())
+
+ #gazebo_server = IncludeLaunchDescription(
+ # PythonLaunchDescriptionSource(
+ # [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])],
+ # launch_arguments={"world": world}.items(),
+ # )
+ #)
+ gazebo_client = IncludeLaunchDescription(
+ PythonLaunchDescriptionSource(
+ [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
+ )
+ )
+
+ # Get URDF via xacro
+ robot_description_content = Command(
+ [
+ PathJoinSubstitution([FindExecutable(name="xacro")]),
+ " ",
+ PathJoinSubstitution(
+ [FindPackageShare("gazebo_env"), "urdf", "panda_and_panda_grippers_multi_camera_scene_early_inpaint_wrist_real.urdf.xacro"]
+ ),
+ " ",
+ "use_gazebo_classic:=true",
+ # PathJoinSubstitution([FindExecutable(name="xacro")]),
+ # " ",
+ # PathJoinSubstitution(
+ # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
+ # ),
+ ]
+ )
+ robot_description = {"robot_description": robot_description_content}
+
+ node_robot_state_publisher = Node(
+ package="robot_state_publisher",
+ executable="robot_state_publisher",
+ output="screen",
+ parameters=[robot_description],
+ )
+
+ spawn_entity = Node(
+ package="gazebo_ros",
+ executable="spawn_entity.py",
+ arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
+ output="screen",
+ )
+
+ joint_state_publisher_node = Node(
+ package="gazebo_env",
+ executable="panda_and_panda_grippers_joint_state_publisher_node.py",
+ output="screen"
+ )
+
+ nodes = [
+ gazebo_server,
+ gazebo_client,
+ node_robot_state_publisher,
+ spawn_entity,
+ joint_state_publisher_node,
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "ur5_left_camera_link",
+ "ur5_left_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "ur5_right_camera_link",
+ "ur5_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_left_camera_link",
+ "panda_left_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_right_camera_link",
+ "panda_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_right_camera_link",
+ "panda_no_gripper_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_left_camera_link",
+ "panda_no_gripper_left_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ )
+ ]
+
+ return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_gazebo_early_inpaint_classic_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_to_ur5_gripper_multi_gazebo_early_inpaint_classic_real.launch.py
similarity index 70%
rename from mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_gazebo_early_inpaint_classic_real.launch.py
rename to mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_to_ur5_gripper_multi_gazebo_early_inpaint_classic_real.launch.py
index 836fc20..fc7f6f5 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_gazebo_early_inpaint_classic_real.launch.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_to_ur5_gripper_multi_gazebo_early_inpaint_classic_real.launch.py
@@ -52,9 +52,6 @@ def generate_launch_description():
declared_arguments.append(declare_world_cmd)
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
gazebo_server = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
launch_arguments={'world': world}.items())
@@ -77,7 +74,7 @@ def generate_launch_description():
PathJoinSubstitution([FindExecutable(name="xacro")]),
" ",
PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "panda_and_panda_grippers_camera_scene_real.urdf.xacro"]
+ [FindPackageShare("gazebo_env"), "urdf", "panda_and_panda_grippers_to_ur5_gripper_multi_camera_scene_early_inpaint_real.urdf.xacro"]
),
" ",
"use_gazebo_classic:=true",
@@ -104,30 +101,12 @@ def generate_launch_description():
output="screen",
)
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
joint_state_publisher_node = Node(
package="gazebo_env",
executable="panda_and_panda_grippers_joint_state_publisher_node.py",
output="screen"
)
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
nodes = [
gazebo_server,
gazebo_client,
@@ -144,8 +123,8 @@ def generate_launch_description():
"-1.571",
"0.0", # .33
"-1.571",
- "ur5_camera_link",
- "ur5_real_camera_link",
+ "ur5_left_camera_link",
+ "ur5_left_real_camera_link",
# "0.35", ##original .50
# "-0.15", # -.05
# "0.0", # changed to .55 from .45
@@ -166,8 +145,8 @@ def generate_launch_description():
"-1.571",
"0.0", # .33
"-1.571",
- "panda_camera_link",
- "panda_real_camera_link",
+ "ur5_right_camera_link",
+ "ur5_right_real_camera_link",
# "0.35", ##original .50
# "-0.15", # -.05
# "0.0", # changed to .55 from .45
@@ -188,8 +167,8 @@ def generate_launch_description():
"-1.571",
"0.0", # .33
"-1.571",
- "panda_no_gripper_camera_link",
- "panda_no_gripper_real_camera_link",
+ "panda_left_camera_link",
+ "panda_left_real_camera_link",
# "0.35", ##original .50
# "-0.15", # -.05
# "0.0", # changed to .55 from .45
@@ -200,9 +179,72 @@ def generate_launch_description():
# "camera_link",
],
),
- #image_sub_node
- #joint_state_broadcaster_spawner,
- #robot_controller_spawner,
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_right_camera_link",
+ "panda_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_right_camera_link",
+ "panda_no_gripper_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_left_camera_link",
+ "panda_no_gripper_left_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ )
]
return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_to_ur5_gripper_multi_gazebo_early_inpaint_classic_wrist_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_to_ur5_gripper_multi_gazebo_early_inpaint_classic_wrist_real.launch.py
new file mode 100644
index 0000000..8ff5511
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_and_panda_grippers_to_ur5_gripper_multi_gazebo_early_inpaint_classic_wrist_real.launch.py
@@ -0,0 +1,250 @@
+# Copyright 2021 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from launch import LaunchDescription
+from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
+from launch.conditions import IfCondition
+from launch.launch_description_sources import PythonLaunchDescriptionSource
+from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
+from launch_ros.actions import Node
+from launch_ros.substitutions import FindPackageShare
+import os
+
+def generate_launch_description():
+
+ # Set the path to the Gazebo ROS package
+ pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros')
+
+ # Set the path to this package.
+ pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env')
+
+ # Set the path to the world file
+ world_file_name = 'no_shadow_wrist_dark_ur5.world'
+ world_path = os.path.join(pkg_share, 'worlds', world_file_name)
+
+ world = LaunchConfiguration('world')
+
+ declare_world_cmd = DeclareLaunchArgument(
+ name='world',
+ default_value=world_path,
+ description='Full path to the world model file to load')
+
+ # Declare arguments
+ declared_arguments = []
+ declared_arguments.append(
+ DeclareLaunchArgument(
+ "gui",
+ default_value="false",
+ description="Start RViz2 automatically with this launch file.",
+ )
+ )
+
+ declared_arguments.append(declare_world_cmd)
+
+ gazebo_server = IncludeLaunchDescription(
+ PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
+ launch_arguments={'world': world}.items())
+
+ #gazebo_server = IncludeLaunchDescription(
+ # PythonLaunchDescriptionSource(
+ # [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])],
+ # launch_arguments={"world": world}.items(),
+ # )
+ #)
+ gazebo_client = IncludeLaunchDescription(
+ PythonLaunchDescriptionSource(
+ [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
+ )
+ )
+
+ # Get URDF via xacro
+ robot_description_content = Command(
+ [
+ PathJoinSubstitution([FindExecutable(name="xacro")]),
+ " ",
+ PathJoinSubstitution(
+ [FindPackageShare("gazebo_env"), "urdf", "panda_and_panda_grippers_to_ur5_gripper_multi_camera_scene_early_inpaint_wrist_real.urdf.xacro"]
+ ),
+ " ",
+ "use_gazebo_classic:=true",
+ # PathJoinSubstitution([FindExecutable(name="xacro")]),
+ # " ",
+ # PathJoinSubstitution(
+ # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
+ # ),
+ ]
+ )
+ robot_description = {"robot_description": robot_description_content}
+
+ node_robot_state_publisher = Node(
+ package="robot_state_publisher",
+ executable="robot_state_publisher",
+ output="screen",
+ parameters=[robot_description],
+ )
+
+ spawn_entity = Node(
+ package="gazebo_ros",
+ executable="spawn_entity.py",
+ arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
+ output="screen",
+ )
+
+ joint_state_publisher_node = Node(
+ package="gazebo_env",
+ executable="panda_and_panda_grippers_joint_state_publisher_node.py",
+ output="screen"
+ )
+
+ nodes = [
+ gazebo_server,
+ gazebo_client,
+ node_robot_state_publisher,
+ spawn_entity,
+ joint_state_publisher_node,
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "ur5_left_camera_link",
+ "ur5_left_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "ur5_right_camera_link",
+ "ur5_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_left_camera_link",
+ "panda_left_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_right_camera_link",
+ "panda_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_right_camera_link",
+ "panda_no_gripper_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_left_camera_link",
+ "panda_no_gripper_left_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ )
+ ]
+
+ return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic.launch.py
deleted file mode 100644
index 3df21fd..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic.launch.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])]
- )
- )
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "panda_arm_hand_camera_scene.urdf.xacro"]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_panda_joint_state_publisher_node.py",
- output="screen"
- )
-
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- #image_sub_node
- #joint_state_broadcaster_spawner,
- #robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_real.launch.py
deleted file mode 100644
index 42dd311..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_real.launch.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import (
- Command,
- FindExecutable,
- PathJoinSubstitution,
- LaunchConfiguration,
-)
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [
- PathJoinSubstitution(
- [FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"]
- )
- ]
- )
- )
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [
- PathJoinSubstitution(
- [FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"]
- )
- ]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [
- FindPackageShare("gazebo_env"),
- "urdf",
- "panda_arm_hand_camera_scene_real.urdf.xacro",
- ]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=[
- "joint_state_broadcaster",
- "--controller-manager",
- "/controller_manager",
- ],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_panda_joint_state_publisher_node.py",
- output="screen",
- )
-
- image_sub_node = Node(
- package="gazebo_env", executable="gazebo_image_test.py", output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=[
- "forward_position_controller",
- "--controller-manager",
- "/controller_manager",
- ],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "camera_link",
- "real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- # image_sub_node
- # joint_state_broadcaster_spawner,
- # robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite.launch.py
deleted file mode 100644
index 5b11374..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite.launch.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import (
- Command,
- FindExecutable,
- PathJoinSubstitution,
- LaunchConfiguration,
-)
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [
- PathJoinSubstitution(
- [FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"]
- )
- ]
- )
- )
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [
- PathJoinSubstitution(
- [FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"]
- )
- ]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [
- FindPackageShare("gazebo_env"),
- "urdf",
- "panda_arm_hand_camera_scene_robosuite.urdf.xacro",
- ]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=[
- "joint_state_broadcaster",
- "--controller-manager",
- "/controller_manager",
- ],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_panda_joint_state_publisher_node.py",
- output="screen",
- )
-
- image_sub_node = Node(
- package="gazebo_env", executable="gazebo_image_test.py", output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=[
- "forward_position_controller",
- "--controller-manager",
- "/controller_manager",
- ],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "camera_link",
- "real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- # image_sub_node
- # joint_state_broadcaster_spawner,
- # robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading.launch.py
index c38a7a6..949ecbd 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading.launch.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading.launch.py
@@ -141,7 +141,7 @@ def generate_launch_description():
nodes = [
gazebo_server,
- #gazebo_client,
+ gazebo_client,
node_robot_state_publisher,
spawn_entity,
joint_state_publisher_node,
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading_no_gripper.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading_no_gripper.launch.py
deleted file mode 100644
index 6ccabc3..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading_no_gripper.launch.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import (
- Command,
- FindExecutable,
- PathJoinSubstitution,
- LaunchConfiguration,
-)
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-import os
-
-def generate_launch_description():
- # Set the path to the Gazebo ROS package
- pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros')
-
- # Set the path to this package.
- pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env')
-
- # Set the path to the world file
- world_file_name = 'no_shadow_sim.world'
- world_path = os.path.join(pkg_share, 'worlds', world_file_name)
-
- world = LaunchConfiguration('world')
-
- declare_world_cmd = DeclareLaunchArgument(
- name='world',
- default_value=world_path,
- description='Full path to the world model file to load')
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- declared_arguments.append(declare_world_cmd)
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
- launch_arguments={'world': world}.items())
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [
- PathJoinSubstitution(
- [FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"]
- )
- ]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [
- FindPackageShare("gazebo_env"),
- "urdf",
- "panda_arm_hand_camera_scene_robosuite_lift_square_stack_three_threading_no_gripper.urdf.xacro",
- ]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=[
- "joint_state_broadcaster",
- "--controller-manager",
- "/controller_manager",
- ],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_panda_joint_state_publisher_node.py",
- output="screen",
- )
-
- image_sub_node = Node(
- package="gazebo_env", executable="gazebo_image_test.py", output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=[
- "forward_position_controller",
- "--controller-manager",
- "/controller_manager",
- ],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "camera_link",
- "real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- # image_sub_node
- # joint_state_broadcaster_spawner,
- # robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_ur5e_gripper_gazebo_classic_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_ur5e_gripper_gazebo_classic_real.launch.py
deleted file mode 100644
index e241e22..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/panda_ur5e_gripper_gazebo_classic_real.launch.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import (
- Command,
- FindExecutable,
- PathJoinSubstitution,
- LaunchConfiguration,
-)
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [
- PathJoinSubstitution(
- [FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"]
- )
- ]
- )
- )
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [
- PathJoinSubstitution(
- [FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"]
- )
- ]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [
- FindPackageShare("gazebo_env"),
- "urdf",
- "panda_arm_hand_ur5e_gripper_camera_scene_real.urdf.xacro",
- ]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=[
- "joint_state_broadcaster",
- "--controller-manager",
- "/controller_manager",
- ],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_real_panda_ur5e_gripper_joint_state_publisher_node.py",
- output="screen",
- )
-
- image_sub_node = Node(
- package="gazebo_env", executable="gazebo_image_test.py", output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=[
- "forward_position_controller",
- "--controller-manager",
- "/controller_manager",
- ],
- )
-
- nodes = [
- gazebo_server,
- #gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "camera_link",
- "real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- # image_sub_node
- # joint_state_broadcaster_spawner,
- # robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_gripper_gazebo_classic_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_gripper_gazebo_classic_real.launch.py
deleted file mode 100644
index cbb49d9..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_gripper_gazebo_classic_real.launch.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-import os
-
-def generate_launch_description():
-
- # Set the path to the Gazebo ROS package
- pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros')
-
- # Set the path to this package.
- pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env')
-
- # Set the path to the world file
- world_file_name = 'no_shadow.world'
- world_path = os.path.join(pkg_share, 'worlds', world_file_name)
-
- world = LaunchConfiguration('world')
-
- declare_world_cmd = DeclareLaunchArgument(
- name='world',
- default_value=world_path,
- description='Full path to the world model file to load')
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- declared_arguments.append(declare_world_cmd)
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
- launch_arguments={'world': world}.items())
-
- #gazebo_server = IncludeLaunchDescription(
- # PythonLaunchDescriptionSource(
- # [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])],
- # launch_arguments={"world": world}.items(),
- # )
- #)
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "ur5_and_panda_gripper_camera_scene_real.urdf.xacro"]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_ur5_and_panda_gripper_joint_state_publisher_node.py",
- output="screen"
- )
-
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "ur5_camera_link",
- "ur5_real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "panda_camera_link",
- "panda_real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- #image_sub_node
- #joint_state_broadcaster_spawner,
- #robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_gripper_multi_cam_gazebo_classic_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_gripper_multi_cam_gazebo_early_inpaint_classic_real.launch.py
similarity index 82%
rename from mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_gripper_multi_cam_gazebo_classic_real.launch.py
rename to mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_gripper_multi_cam_gazebo_early_inpaint_classic_real.launch.py
index 5dc189e..0122de7 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_gripper_multi_cam_gazebo_classic_real.launch.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_gripper_multi_cam_gazebo_early_inpaint_classic_real.launch.py
@@ -52,9 +52,6 @@ def generate_launch_description():
declared_arguments.append(declare_world_cmd)
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
gazebo_server = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
launch_arguments={'world': world}.items())
@@ -77,7 +74,7 @@ def generate_launch_description():
PathJoinSubstitution([FindExecutable(name="xacro")]),
" ",
PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "ur5_and_panda_gripper_multi_camera_scene_real.urdf.xacro"]
+ [FindPackageShare("gazebo_env"), "urdf", "ur5_and_panda_gripper_multi_camera_early_inpaint_scene_real.urdf.xacro"]
),
" ",
"use_gazebo_classic:=true",
@@ -104,30 +101,12 @@ def generate_launch_description():
output="screen",
)
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
joint_state_publisher_node = Node(
package="gazebo_env",
executable="full_ur5_and_panda_gripper_joint_state_publisher_node.py",
output="screen"
)
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
nodes = [
gazebo_server,
gazebo_client,
@@ -222,6 +201,50 @@ def generate_launch_description():
# "camera_link",
],
),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "ur5_no_gripper_right_camera_link",
+ "ur5_no_gripper_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "ur5_no_gripper_left_camera_link",
+ "ur5_no_gripper_left_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ )
#image_sub_node
#joint_state_broadcaster_spawner,
#robot_controller_spawner,
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_no_gripper_gazebo_classic_real_toy_pick_n_place.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_no_gripper_gazebo_classic_real_toy_pick_n_place.launch.py
deleted file mode 100644
index 2548c52..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_no_gripper_gazebo_classic_real_toy_pick_n_place.launch.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-import os
-
-def generate_launch_description():
-
- # Set the path to the Gazebo ROS package
- pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros')
-
- # Set the path to this package.
- pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env')
-
- # Set the path to the world file
- world_file_name = 'no_shadow.world'
- world_path = os.path.join(pkg_share, 'worlds', world_file_name)
-
- world = LaunchConfiguration('world')
-
- declare_world_cmd = DeclareLaunchArgument(
- name='world',
- default_value=world_path,
- description='Full path to the world model file to load')
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- declared_arguments.append(declare_world_cmd)
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
- launch_arguments={'world': world}.items())
-
- #gazebo_server = IncludeLaunchDescription(
- # PythonLaunchDescriptionSource(
- # [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])],
- # launch_arguments={"world": world}.items(),
- # )
- #)
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "ur5_and_panda_no_gripper_camera_scene_real_toy_pick_n_place.urdf.xacro"]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_ur5_and_panda_no_gripper_joint_state_publisher_node.py",
- output="screen"
- )
-
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "ur5_camera_link",
- "ur5_real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "panda_camera_link",
- "panda_real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- #image_sub_node
- #joint_state_broadcaster_spawner,
- #robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_no_gripper_multi_cam_gazebo_classic_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_no_gripper_multi_cam_gazebo_early_inpaint_classic_real.launch.py
similarity index 82%
rename from mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_no_gripper_multi_cam_gazebo_classic_real.launch.py
rename to mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_no_gripper_multi_cam_gazebo_early_inpaint_classic_real.launch.py
index 5797635..6cca114 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_no_gripper_multi_cam_gazebo_classic_real.launch.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_and_panda_no_gripper_multi_cam_gazebo_early_inpaint_classic_real.launch.py
@@ -52,9 +52,6 @@ def generate_launch_description():
declared_arguments.append(declare_world_cmd)
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
gazebo_server = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')),
launch_arguments={'world': world}.items())
@@ -77,7 +74,7 @@ def generate_launch_description():
PathJoinSubstitution([FindExecutable(name="xacro")]),
" ",
PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "ur5_and_panda_no_gripper_multi_camera_scene_real.urdf.xacro"]
+ [FindPackageShare("gazebo_env"), "urdf", "ur5_and_panda_no_gripper_multi_camera_early_inpaint_scene_real.urdf.xacro"]
),
" ",
"use_gazebo_classic:=true",
@@ -104,30 +101,12 @@ def generate_launch_description():
output="screen",
)
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
joint_state_publisher_node = Node(
package="gazebo_env",
executable="full_ur5_and_panda_no_gripper_joint_state_publisher_node.py",
output="screen"
)
-
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
+
nodes = [
gazebo_server,
gazebo_client,
@@ -222,6 +201,50 @@ def generate_launch_description():
# "camera_link",
],
),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_right_camera_link",
+ "panda_no_gripper_right_real_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ ),
+ Node(
+ package="tf2_ros",
+ executable="static_transform_publisher",
+ arguments=[
+ "0.0",
+ "0.0",
+ "0.0", # 0.68
+ "-1.571",
+ "0.0", # .33
+ "-1.571",
+ "panda_no_gripper_left_camera_link",
+ "panda_no_gripper_left_camera_link",
+ # "0.35", ##original .50
+ # "-0.15", # -.05
+ # "0.0", # changed to .55 from .45
+ # "0",
+ # "0", #original 0
+ # "0.0",#1.57
+ # "base_link",
+ # "camera_link",
+ ],
+ )
#image_sub_node
#joint_state_broadcaster_spawner,
#robot_controller_spawner,
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_gazebo_classic_real.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_gazebo_classic_real.launch.py
deleted file mode 100644
index 9a27e1a..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5_gazebo_classic_real.launch.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])]
- )
- )
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "ur5_with_gripper_camera_scene_real.urdf.xacro"]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_ur5e_joint_state_publisher_node.py",
- output="screen"
- )
-
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- arguments=[
- "0.0",
- "0.0",
- "0.0", # 0.68
- "-1.571",
- "0.0", # .33
- "-1.571",
- "camera_link",
- "real_camera_link",
- # "0.35", ##original .50
- # "-0.15", # -.05
- # "0.0", # changed to .55 from .45
- # "0",
- # "0", #original 0
- # "0.0",#1.57
- # "base_link",
- # "camera_link",
- ],
- ),
- #image_sub_node
- #joint_state_broadcaster_spawner,
- #robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_gazebo_classic.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_gazebo_classic.launch.py
deleted file mode 100644
index 332254e..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_gazebo_classic.launch.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])]
- )
- )
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "ur5e_nvidia_with_gripper_camera_scene.urdf.xacro"]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_ur5e_joint_state_publisher_node.py",
- output="screen"
- )
-
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- #image_sub_node
- #joint_state_broadcaster_spawner,
- #robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_gazebo_classic_pointcloud_debug.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_gazebo_classic_pointcloud_debug.launch.py
deleted file mode 100644
index 075a64f..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_gazebo_classic_pointcloud_debug.launch.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])]
- )
- )
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "ur5e_gazebo_pointcloud_debug.urdf.xacro"]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_broadcaster_spawner,
- #robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_gazebo_classic_robosuite.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_gazebo_classic_robosuite.launch.py
deleted file mode 100644
index 4bf7f70..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_gazebo_classic_robosuite.launch.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])]
- )
- )
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "urdf", "ur5e_nvidia_with_gripper_camera_scene_robosuite.urdf.xacro"]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
- )
-
- joint_state_publisher_node = Node(
- package="gazebo_env",
- executable="full_ur5e_joint_state_publisher_node.py",
- output="screen"
- )
-
- image_sub_node = Node(
- package="gazebo_env",
- executable="gazebo_image_test.py",
- output="screen"
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_publisher_node,
- #image_sub_node
- #joint_state_broadcaster_spawner,
- #robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_issac_sim.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_issac_sim.launch.py
deleted file mode 100644
index 5c1721b..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_issac_sim.launch.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2021 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
-from launch.conditions import IfCondition
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
-
- # Declare arguments
- declared_arguments = []
- declared_arguments.append(
- DeclareLaunchArgument(
- "gui",
- default_value="false",
- description="Start RViz2 automatically with this launch file.",
- )
- )
-
- # Initialize Arguments
- gui = LaunchConfiguration("gui")
-
- gazebo_server = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"])]
- )
- )
- gazebo_client = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- [PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gzclient.launch.py"])]
- )
- )
-
- # Get URDF via xacro
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- PathJoinSubstitution(
- [FindPackageShare("gazebo_env"), "ur5e_description","robots", "ur5e_with_gripper.urdf"]
- ),
- " ",
- "use_gazebo_classic:=true",
- # PathJoinSubstitution([FindExecutable(name="xacro")]),
- # " ",
- # PathJoinSubstitution(
- # [FindPackageShare("gazebo_env"), "urdf", "d435.urdf"]
- # ),
- ]
- )
- robot_description = {"robot_description": robot_description_content}
-
- node_robot_state_publisher = Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- output="screen",
- parameters=[robot_description],
- )
-
- spawn_entity = Node(
- package="gazebo_ros",
- executable="spawn_entity.py",
- arguments=["-topic", "robot_description", "-entity", "urbot_system_position"],
- output="screen",
- )
-
- joint_state_broadcaster_spawner = Node(
- package="joint_state_publisher",
- executable="joint_state_publisher",
- )
-
- robot_controller_spawner = Node(
- package="controller_manager",
- executable="spawner",
- arguments=["forward_position_controller", "--controller-manager", "/controller_manager"],
- )
-
- nodes = [
- gazebo_server,
- gazebo_client,
- node_robot_state_publisher,
- spawn_entity,
- joint_state_broadcaster_spawner,
- #robot_controller_spawner,
- ]
-
- return LaunchDescription(declared_arguments + nodes)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_joint_pointcloud.launch.py b/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_joint_pointcloud.launch.py
deleted file mode 100644
index 765547e..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/bringup/launch/ur5e_joint_pointcloud.launch.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import os
-import launch
-import launch_ros.actions
-import pathlib
-
-
-def generate_launch_description():
- return launch.LaunchDescription(
- [
- launch_ros.actions.Node(
- package="gazebo_env",
- executable="ur5e_point_cloud_publisher.py",
- name="ur5e_point_cloud_publisher",
- output="both",
- )
- ]
- )
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/crossing_the_body/body_the_cross.py b/mirage/mirage/ros_ws/src/gazebo_env/crossing_the_body/body_the_cross.py
deleted file mode 100644
index fc21f0f..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/crossing_the_body/body_the_cross.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import cv2
-import numpy as np
-
-# Load the world image and the mask (replace with your image and mask paths)
-world_image = cv2.imread('inpainted_result_image.jpg')
-franka_mask = cv2.imread('gazebo_mask.jpg',0)
-_, franka_mask = cv2.threshold(franka_mask, 128, 255, cv2.THRESH_BINARY)
-inverted_mask = cv2.bitwise_not(franka_mask)
-result_image = cv2.bitwise_and(world_image, world_image, mask=inverted_mask)
-mask = cv2.imread('gazebo_mask.jpg', 0) # Load the mask in grayscale
-#_, mask = cv2.threshold(mask, 128, 255, cv2.THRESH_BINARY)
-# Invert the mask
-#inverted_mask = cv2.bitwise_not(mask)
-
-# Create a new image that includes all parts of the world image except for the mask
-#result_image = cv2.bitwise_and(world_image, world_image, mask=inverted_mask)
-ur5_image = cv2.imread('gazebo_robot_only.jpg')
-merge = ur5_image + result_image
-# Save or display the result
-cv2.imwrite('result_image.jpg', merge) # Optionally, save the result image
-cv2.imshow('Result Image', merge) # Optionally, display the result image
-cv2.waitKey(0) # Wait for a key press
-cv2.destroyAllWindows() # Close the window
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/crossing_the_body/nearest_neighbor.py b/mirage/mirage/ros_ws/src/gazebo_env/crossing_the_body/nearest_neighbor.py
deleted file mode 100644
index b72f80b..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/crossing_the_body/nearest_neighbor.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import cv2
-import numpy as np
-
-# Load the result image with the masked region
-img = cv2.imread('franka.jpg')
-
-# Load the mask (same mask used earlier)
-mask = cv2.imread('seg.jpg', 0)
-_, mask = cv2.threshold(mask, 128, 255, cv2.THRESH_BINARY)
-inverted_mask = cv2.bitwise_not(mask)
-result_image = cv2.bitwise_and(img, img, mask=inverted_mask)
-#output = cv2.inpaint(img,mask,100,cv2.INPAINT_NS)
-# Save or display the inpainted result
-cv2.imwrite('test.jpg', result_image) # Optionally, save the inpainted result image
-cv2.imshow('Inpainted Result', result_image) # Optionally, display the inpainted result
-cv2.waitKey(0) # Wait for a key press
-cv2.destroyAllWindows() # Close the window
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/box.sdf b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/box.sdf
deleted file mode 100644
index 0f4d1d3..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/box.sdf
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
-
- 0 0 0.5 0 0 0
- true
-
-
- 1.0
-
-
- 0.083
- 0.0
- 0.0
- 0.083
- 0.0
- 0.083
-
-
-
-
-
- 1 1 1
-
-
-
-
-
-
- 1 1 1
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/d435.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/d435.urdf.xacro
index 534eb53..d248a23 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/d435.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/d435.urdf.xacro
@@ -108,8 +108,8 @@
0.7853981634
R8G8B8
- 512
- 512
+ 256
+ 256
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/robosuite_cam.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/d435_robosuite.urdf.xacro
similarity index 96%
rename from mirage/mirage/ros_ws/src/gazebo_env/description/urdf/robosuite_cam.urdf.xacro
rename to mirage/mirage/ros_ws/src/gazebo_env/description/urdf/d435_robosuite.urdf.xacro
index d248a23..d6622bc 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/robosuite_cam.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/d435_robosuite.urdf.xacro
@@ -4,7 +4,7 @@
-
@@ -108,10 +108,8 @@
0.7853981634
R8G8B8
- 256
- 256
-
+ 84
+ 84
0.05
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/nvidia_with_gripper.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/nvidia_with_gripper.urdf.xacro
deleted file mode 100644
index 34ecd36..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/nvidia_with_gripper.urdf.xacro
+++ /dev/null
@@ -1,304 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/nvidia_with_gripper_camera_scene.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/nvidia_with_gripper_camera_scene.urdf.xacro
deleted file mode 100644
index acb7eab..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/nvidia_with_gripper_camera_scene.urdf.xacro
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/nvidia_with_gripper_solo.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/nvidia_with_gripper_solo.urdf.xacro
deleted file mode 100644
index 0d2179f..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/nvidia_with_gripper_solo.urdf.xacro
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_camera_scene_early_inpaint_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_camera_scene_early_inpaint_real.urdf.xacro
deleted file mode 100644
index a340900..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_camera_scene_early_inpaint_real.urdf.xacro
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_camera_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_camera_scene_real.urdf.xacro
deleted file mode 100644
index 40120b2..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_camera_scene_real.urdf.xacro
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_multi_camera_scene_early_inpaint_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_multi_camera_scene_early_inpaint_real.urdf.xacro
new file mode 100644
index 0000000..1e7aae1
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_multi_camera_scene_early_inpaint_real.urdf.xacro
@@ -0,0 +1,52 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_multi_camera_scene_early_inpaint_wrist_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_multi_camera_scene_early_inpaint_wrist_real.urdf.xacro
new file mode 100644
index 0000000..7e0d464
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_multi_camera_scene_early_inpaint_wrist_real.urdf.xacro
@@ -0,0 +1,46 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_multi_camera_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_to_ur5_gripper_multi_camera_scene_early_inpaint_real.urdf.xacro
similarity index 52%
rename from mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_multi_camera_scene_real.urdf.xacro
rename to mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_to_ur5_gripper_multi_camera_scene_early_inpaint_real.urdf.xacro
index cb3ea52..e9f9da1 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_multi_camera_scene_real.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_to_ur5_gripper_multi_camera_scene_early_inpaint_real.urdf.xacro
@@ -10,10 +10,10 @@
-
+
-
+
@@ -23,16 +23,30 @@
-
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_to_ur5_gripper_multi_camera_scene_early_inpaint_wrist_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_to_ur5_gripper_multi_camera_scene_early_inpaint_wrist_real.urdf.xacro
new file mode 100644
index 0000000..8661f6e
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_and_panda_grippers_to_ur5_gripper_multi_camera_scene_early_inpaint_wrist_real.urdf.xacro
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene.urdf.xacro
deleted file mode 100644
index ebb5c23..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene.urdf.xacro
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_real.urdf.xacro
deleted file mode 100644
index 4ed96d2..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_real.urdf.xacro
+++ /dev/null
@@ -1,30 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite.urdf.xacro
deleted file mode 100644
index e455511..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite.urdf.xacro
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_can.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_can.urdf.xacro
index 68d844e..e529bb9 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_can.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_can.urdf.xacro
@@ -4,13 +4,13 @@
-
+
-
-
+
+
@@ -18,7 +18,7 @@
-
+
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_lift_square_stack_three_threading.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_lift_square_stack_three_threading.urdf.xacro
index e455511..ae9c438 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_lift_square_stack_three_threading.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_lift_square_stack_three_threading.urdf.xacro
@@ -9,8 +9,8 @@
-
-
+
+
@@ -18,7 +18,7 @@
-
+
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_lift_square_stack_three_threading_no_gripper.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_lift_square_stack_three_threading_no_gripper.urdf.xacro
deleted file mode 100644
index c2bff7f..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_lift_square_stack_three_threading_no_gripper.urdf.xacro
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_three_piece_assembly.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_three_piece_assembly.urdf.xacro
index 3dd3272..b3159eb 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_three_piece_assembly.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_three_piece_assembly.urdf.xacro
@@ -9,15 +9,15 @@
-
-
+
+
-
+
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_only.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_only.urdf.xacro
deleted file mode 100644
index d80c393..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_only.urdf.xacro
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_only_ik.urdf b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_only_ik.urdf
deleted file mode 100644
index aa36b4d..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_only_ik.urdf
+++ /dev/null
@@ -1,301 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_only_ik_robosuite.urdf b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_only_ik_robosuite.urdf
deleted file mode 100644
index 942c095..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_only_ik_robosuite.urdf
+++ /dev/null
@@ -1,316 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_ur5e_gripper_camera_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_ur5e_gripper_camera_scene_real.urdf.xacro
deleted file mode 100644
index 5b13a32..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_ur5e_gripper_camera_scene_real.urdf.xacro
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_ur5e_gripper_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_ur5e_gripper_scene_real.urdf.xacro
deleted file mode 100644
index 0c10ce9..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_ur5e_gripper_scene_real.urdf.xacro
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_robosuite.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_ur5e_gripper_wrist.xacro
similarity index 59%
rename from mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_robosuite.urdf.xacro
rename to mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_ur5e_gripper_wrist.xacro
index af17921..bf4d531 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_robosuite.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_ur5e_gripper_wrist.xacro
@@ -1,21 +1,21 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
@@ -23,205 +23,221 @@
-
+
-
-
-
+
+
+
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
-
+
-
+
-
-
-
-
+
-
-
-
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
-
+
-
-
-
-
+
-
-
-
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
+
-
-
-
-
+
-
-
-
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
+
-
-
-
-
+
-
-
-
+
+
+
-
-
-
-
+
+
+
+
+
-
-
+
-
+
-
+
-
-
-
-
+
-
-
-
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
+
-
-
-
-
+
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
@@ -233,10 +249,32 @@
+
+
+
+
+
+
+ Gazebo/Black
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
+
@@ -260,6 +298,12 @@
izx="0.000000" izy="0.000000" izz="0.001171" />
+
+
+
+
+
+
@@ -509,37 +553,23 @@
-
-
-
+
+
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_wrist.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_wrist.xacro
new file mode 100644
index 0000000..d022fd7
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/panda_arm_wrist.xacro
@@ -0,0 +1,233 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam.urdf.xacro
index 9e66898..e7afa15 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam.urdf.xacro
@@ -24,14 +24,6 @@
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_left.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_left.urdf.xacro
index a8f8a23..21b34d0 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_left.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_left.urdf.xacro
@@ -24,14 +24,6 @@
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_left_wrist.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_left_wrist.urdf.xacro
new file mode 100644
index 0000000..75be254
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_left_wrist.urdf.xacro
@@ -0,0 +1,129 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Gazebo/TurquoiseGlowOutline
+
+ 0 0 0 0 0 0
+ false
+ 30
+
+
+ 1.436612444
+
+ R8G8B8
+ 1280
+ 720
+
+
+
+ 0.005
+ 8.0
+
+
+
+ panda_left_camera_color_optical_frame
+
+
+ 0.005
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper.urdf.xacro
index 660798f..8d20c88 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper.urdf.xacro
@@ -24,14 +24,6 @@
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper_left.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper_left.urdf.xacro
new file mode 100644
index 0000000..69394e7
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper_left.urdf.xacro
@@ -0,0 +1,122 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Gazebo/TurquoiseGlowOutline
+
+ 0 0 0 0 0 0
+ false
+ 30
+
+
+ 1.768165077
+
+ R8G8B8
+ 1280
+ 720
+
+
+
+ 0.005
+ 8.0
+
+
+
+ panda_no_gripper_left_camera_color_optical_frame
+
+
+ 0.005
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper_right.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper_right.urdf.xacro
new file mode 100644
index 0000000..526878e
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_no_gripper_right.urdf.xacro
@@ -0,0 +1,122 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Gazebo/TurquoiseGlowOutline
+
+ 0 0 0 0 0 0
+ false
+ 30
+
+
+ 1.768165077
+
+ R8G8B8
+ 1280
+ 720
+
+
+
+ 0.005
+ 8.0
+
+
+
+ panda_no_gripper_right_camera_color_optical_frame
+
+
+ 0.005
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_right.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_right.urdf.xacro
index 005382e..7958b64 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_right.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_right.urdf.xacro
@@ -24,14 +24,6 @@
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_right_wrist.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_right_wrist.urdf.xacro
new file mode 100644
index 0000000..d5150c7
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_panda_cam_right_wrist.urdf.xacro
@@ -0,0 +1,129 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Gazebo/TurquoiseGlowOutline
+
+ 0 0 0 0 0 0
+ false
+ 30
+
+
+ 1.768165077
+
+ R8G8B8
+ 1280
+ 720
+
+
+
+ 0.005
+ 8.0
+
+
+
+ panda_right_camera_color_optical_frame
+
+
+ 0.005
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam.urdf.xacro
index 843700b..fe5c980 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam.urdf.xacro
@@ -24,14 +24,6 @@
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_left_wrist.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_left_wrist.urdf.xacro
new file mode 100644
index 0000000..3e30825
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_left_wrist.urdf.xacro
@@ -0,0 +1,130 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Gazebo/TurquoiseGlowOutline
+
+ 0 0 0 0 0 0
+ false
+ 30
+
+
+
+ 1.436612444
+
+ R8G8B8
+ 1280
+ 720
+
+
+
+ 0.005
+ 8.0
+
+
+
+ ur5_left_camera_color_optical_frame
+
+
+ 0.005
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_no_gripper_left.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_no_gripper_left.urdf.xacro
new file mode 100644
index 0000000..81bef2d
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_no_gripper_left.urdf.xacro
@@ -0,0 +1,122 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Gazebo/TurquoiseGlowOutline
+
+ 0 0 0 0 0 0
+ false
+ 30
+
+
+ 1.769165872
+
+ R8G8B8
+ 1280
+ 720
+
+
+
+ 0.005
+ 8.0
+
+
+
+ ur5_no_gripper_left_camera_color_optical_frame
+
+
+ 0.005
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_no_gripper_right.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_no_gripper_right.urdf.xacro
new file mode 100644
index 0000000..357daba
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_no_gripper_right.urdf.xacro
@@ -0,0 +1,122 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Gazebo/TurquoiseGlowOutline
+
+ 0 0 0 0 0 0
+ false
+ 30
+
+
+ 1.769165872
+
+ R8G8B8
+ 1280
+ 720
+
+
+
+ 0.005
+ 8.0
+
+
+
+ ur5_no_gripper_right_camera_color_optical_frame
+
+
+ 0.005
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_right.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_right.urdf.xacro
index df0a34a..1070db2 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_right.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_right.urdf.xacro
@@ -24,14 +24,6 @@
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_right_wrist.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_right_wrist.urdf.xacro
new file mode 100644
index 0000000..c835cd8
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/real_ur5_cam_right_wrist.urdf.xacro
@@ -0,0 +1,130 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Gazebo/TurquoiseGlowOutline
+
+ 0 0 0 0 0 0
+ false
+ 30
+
+
+
+ 1.768165077
+
+ R8G8B8
+ 1280
+ 720
+
+
+
+ 0.005
+ 8.0
+
+
+
+ ur5_right_camera_color_optical_frame
+
+
+ 0.005
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_gripper_camera_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_gripper_camera_scene_real.urdf.xacro
deleted file mode 100644
index e0df844..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_gripper_camera_scene_real.urdf.xacro
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_gripper_multi_camera_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_gripper_multi_camera_early_inpaint_scene_real.urdf.xacro
similarity index 66%
rename from mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_gripper_multi_camera_scene_real.urdf.xacro
rename to mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_gripper_multi_camera_early_inpaint_scene_real.urdf.xacro
index bca6ba5..112b233 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_gripper_multi_camera_scene_real.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_gripper_multi_camera_early_inpaint_scene_real.urdf.xacro
@@ -1,5 +1,6 @@
+
@@ -30,8 +31,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_no_gripper_camera_scene_real_toy_pick_n_place.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_no_gripper_camera_scene_real_toy_pick_n_place.urdf.xacro
deleted file mode 100644
index d78adfd..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_no_gripper_camera_scene_real_toy_pick_n_place.urdf.xacro
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_no_gripper_multi_camera_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_no_gripper_multi_camera_early_inpaint_scene_real.urdf.xacro
similarity index 62%
rename from mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_no_gripper_multi_camera_scene_real.urdf.xacro
rename to mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_no_gripper_multi_camera_early_inpaint_scene_real.urdf.xacro
index 8f9b5d9..928f0aa 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_no_gripper_multi_camera_scene_real.urdf.xacro
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_and_panda_no_gripper_multi_camera_early_inpaint_scene_real.urdf.xacro
@@ -27,8 +27,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_gripper_and_panda_camera_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_gripper_and_panda_camera_scene_real.urdf.xacro
deleted file mode 100644
index 8a2cdd8..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_gripper_and_panda_camera_scene_real.urdf.xacro
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_gripper_camera_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_gripper_camera_scene_real.urdf.xacro
deleted file mode 100644
index 51dffa4..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_gripper_camera_scene_real.urdf.xacro
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_gripper_scene_real.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_gripper_scene_real.urdf.xacro
deleted file mode 100644
index b86e6b7..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_gripper_scene_real.urdf.xacro
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_no_gripper.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_no_gripper.urdf.xacro
new file mode 100644
index 0000000..ff5c947
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5_with_no_gripper.urdf.xacro
@@ -0,0 +1,272 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e.urdf.xacro
deleted file mode 100644
index 32abbf9..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e.urdf.xacro
+++ /dev/null
@@ -1,447 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- gazebo_ros2_control/GazeboSystem
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-pi}
- {pi}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
-
-
-
-
- Gazebo/Orange
-
-
-
- 0.2
- 0.2
- Gazebo/Yellow
-
-
-
- 0.2
- 0.2
- Gazebo/Orange
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_analytical.urdf b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_analytical.urdf
deleted file mode 100644
index 1dd43d0..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_analytical.urdf
+++ /dev/null
@@ -1,478 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- gazebo_ros2_control/GazeboSystem
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-pi}
- {pi}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
-
-
-
-
-
- /home/kushtimusprime/cross_embodiment_test_ws/install/gazebo_env/share/gazebo_env/config/urbot_controllers.yaml
-
-
-
-
-
-
-
-
- Gazebo/Orange
-
-
-
- 0.2
- 0.2
- Gazebo/Yellow
-
-
-
- 0.2
- 0.2
- Gazebo/Orange
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_camera_gazebo_scene.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_camera_gazebo_scene.urdf.xacro
deleted file mode 100644
index 5c7680b..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_camera_gazebo_scene.urdf.xacro
+++ /dev/null
@@ -1,30 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_gazebo.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_gazebo.urdf.xacro
deleted file mode 100644
index 50c76f4..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_gazebo.urdf.xacro
+++ /dev/null
@@ -1,512 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- gazebo_ros2_control/GazeboSystem
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-pi}
- {pi}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
-
-
-
-
-
- /home/benchturtle/cross_embodiment_ws/src/gazebo_env/bringup/config/urbot_controllers.yaml
-
-
-
-
-
-
-
-
- Gazebo/Orange
-
-
-
- 0.2
- 0.2
- Gazebo/Yellow
-
-
-
- 0.2
- 0.2
- Gazebo/Orange
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_gazebo_pointcloud_debug.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_gazebo_pointcloud_debug.urdf.xacro
deleted file mode 100644
index 37a3292..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_gazebo_pointcloud_debug.urdf.xacro
+++ /dev/null
@@ -1,469 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- /home/benchturtle/cross_embodiment_ws/src/gazebo_env/bringup/config/urbot_controllers.yaml
-
-
-
-
-
-
-
-
- Gazebo/Orange
-
-
-
- 0.2
- 0.2
- Gazebo/Yellow
-
-
-
- 0.2
- 0.2
- Gazebo/Orange
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_gazebo_solo.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_gazebo_solo.urdf.xacro
deleted file mode 100644
index c51521f..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_gazebo_solo.urdf.xacro
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_camera_scene.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_camera_scene.urdf.xacro
deleted file mode 100644
index 6966eb1..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_camera_scene.urdf.xacro
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_camera_scene_robosuite.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_camera_scene_robosuite.urdf.xacro
deleted file mode 100644
index ede9dac..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_camera_scene_robosuite.urdf.xacro
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo.urdf.xacro
deleted file mode 100644
index fc99c07..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo.urdf.xacro
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo_ik.urdf b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo_ik.urdf
deleted file mode 100644
index fb57ffe..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo_ik.urdf
+++ /dev/null
@@ -1,470 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo_ik_robosuite.urdf b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo_ik_robosuite.urdf
deleted file mode 100644
index 9925bfd..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo_ik_robosuite.urdf
+++ /dev/null
@@ -1,464 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_with_gripper_gazebo_solo.urdf.xacro b/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_with_gripper_gazebo_solo.urdf.xacro
deleted file mode 100644
index 9735c87..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/description/urdf/ur5e_with_gripper_gazebo_solo.urdf.xacro
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/gripper_control_plugin/gripper_control_plugin.cc b/mirage/mirage/ros_ws/src/gazebo_env/gripper_control_plugin/gripper_control_plugin.cc
deleted file mode 100644
index efa27c9..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/gripper_control_plugin/gripper_control_plugin.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef _GRIPPERCONTROL_PLUGIN_HH_
-#define _GRIPPERCONTROL_PLUGIN_HH_
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-namespace gazebo
-{
- /// \brief A plugin to control a NoPhysics sensor.
- class GripperControlPlugin : public ModelPlugin
- {
- /// \brief Constructor
- public: GripperControlPlugin() {}
-
- /// \brief The load function is called by Gazebo when the plugin is
- /// inserted into simulation
- /// \param[in] _model A pointer to the model that this plugin is
- /// attached to.
- /// \param[in] _sdf A pointer to the plugin's SDF element.
- public: virtual void Load(physics::ModelPtr _model, sdf::ElementPtr _sdf)
- {
- // Initialize ROS node
- this->node_ = gazebo_ros::Node::Get(_sdf);
- this->model_name_ = _model->GetName();
- const gazebo_ros::QoS &qos = this->node_->get_qos();
- // Just output a message for now
- std::cerr << "KUSHTIMUS PRIME" << "\n";
- this->robot_subscriber_ = this->node_->create_subscription(
- "joint_commands",
- qos.get_subscription_qos("joint_commands", rclcpp::QoS(1)),
- std::bind(&GripperControlPlugin::jointCommandMsg, this, std::placeholders::_1));
- }
-
- void jointCommandMsg(const std_msgs::msg::Float64MultiArray::SharedPtr msg) {
- physics::WorldPtr world = physics::get_world("default");
- auto model_ptr = world->ModelByName(model_name_);
- auto left_knuckle = model_ptr->GetJoint("robotiq_85_left_knuckle_joint");
- auto right_knuckle = model_ptr->GetJoint("robotiq_85_right_knuckle_joint");
- auto left_inner_knuckle = model_ptr->GetJoint("robotiq_85_left_inner_knuckle_joint");
- auto right_inner_knuckle = model_ptr->GetJoint("robotiq_85_right_inner_knuckle_joint");
- auto left_finger = model_ptr->GetJoint("robotiq_85_left_finger_tip_joint");
- auto right_finger = model_ptr->GetJoint("robotiq_85_right_finger_tip_joint");
- // Needs to coordinate with custon_joint_state_publisher_node.py
- left_knuckle->SetPosition(0,msg->data[0]);
- right_knuckle->SetPosition(0,msg->data[0]);
- left_inner_knuckle->SetPosition(0,msg->data[0]);
- right_inner_knuckle->SetPosition(0,msg->data[0]);
- left_finger->SetPosition(0,-msg->data[0]);
- right_finger->SetPosition(0,-msg->data[0]);
- }
-
-
- private:
- gazebo_ros::Node::SharedPtr node_;
- std::string model_name_;
- rclcpp::Subscription::SharedPtr robot_subscriber_;
-
- };
-
- // Tell Gazebo about this plugin, so that Gazebo can call Load on this plugin.
- GZ_REGISTER_MODEL_PLUGIN(GripperControlPlugin)
-}
-#endif
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/hardware/include/gazebo_env/urbot.hpp b/mirage/mirage/ros_ws/src/gazebo_env/hardware/include/gazebo_env/urbot.hpp
deleted file mode 100644
index 4f7c942..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/hardware/include/gazebo_env/urbot.hpp
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2023 ros2_control Development Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GAZEBO_ENV__URBOT_HPP_
-#define GAZEBO_ENV__URBOT_HPP_
-
-#include
-#include
-#include
-
-#include "hardware_interface/handle.hpp"
-#include "hardware_interface/hardware_info.hpp"
-#include "hardware_interface/system_interface.hpp"
-#include "hardware_interface/types/hardware_interface_return_values.hpp"
-#include "rclcpp/macros.hpp"
-#include "rclcpp_lifecycle/node_interfaces/lifecycle_node_interface.hpp"
-#include "rclcpp_lifecycle/state.hpp"
-#include "gazebo_env/visibility_control.h"
-
-namespace gazebo_env
-{
-class URBotSystemPositionOnlyHardware : public hardware_interface::SystemInterface
-{
-public:
- RCLCPP_SHARED_PTR_DEFINITIONS(URBotSystemPositionOnlyHardware);
-
- GAZEBO_ENV_PUBLIC
- hardware_interface::CallbackReturn on_init(
- const hardware_interface::HardwareInfo & info) override;
-
- GAZEBO_ENV_PUBLIC
- hardware_interface::CallbackReturn on_configure(
- const rclcpp_lifecycle::State & previous_state) override;
-
- GAZEBO_ENV_PUBLIC
- std::vector export_state_interfaces() override;
-
- GAZEBO_ENV_PUBLIC
- std::vector export_command_interfaces() override;
-
- GAZEBO_ENV_PUBLIC
- hardware_interface::CallbackReturn on_activate(
- const rclcpp_lifecycle::State & previous_state) override;
-
- GAZEBO_ENV_PUBLIC
- hardware_interface::CallbackReturn on_deactivate(
- const rclcpp_lifecycle::State & previous_state) override;
-
- GAZEBO_ENV_PUBLIC
- hardware_interface::return_type read(
- const rclcpp::Time & time, const rclcpp::Duration & period) override;
-
- GAZEBO_ENV_PUBLIC
- hardware_interface::return_type write(
- const rclcpp::Time & time, const rclcpp::Duration & period) override;
-
-private:
- // Parameters for the URBot simulation
- double hw_start_sec_;
- double hw_stop_sec_;
- double hw_slowdown_;
-
- // Store the command for the simulated robot
- std::vector hw_commands_;
- std::vector hw_states_;
-};
-
-} // namespace gazebo_env
-
-#endif // GAZEBO_ENV__URBOT_HPP_
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/hardware/include/gazebo_env/visibility_control.h b/mirage/mirage/ros_ws/src/gazebo_env/hardware/include/gazebo_env/visibility_control.h
deleted file mode 100644
index 760ac67..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/hardware/include/gazebo_env/visibility_control.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2021 ros2_control Development Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/* This header must be included by all rclcpp headers which declare symbols
- * which are defined in the rclcpp library. When not building the rclcpp
- * library, i.e. when using the headers in other package's code, the contents
- * of this header change the visibility of certain symbols which the rclcpp
- * library cannot have, but the consuming code must have inorder to link.
- */
-
-#ifndef GAZEBO_ENV__VISIBILITY_CONTROL_H_
-#define GAZEBO_ENV__VISIBILITY_CONTROL_H_
-
-// This logic was borrowed (then namespaced) from the examples on the gcc wiki:
-// https://gcc.gnu.org/wiki/Visibility
-
-#if defined _WIN32 || defined __CYGWIN__
-#ifdef __GNUC__
-#define GAZEBO_ENV_EXPORT __attribute__((dllexport))
-#define GAZEBO_ENV_IMPORT __attribute__((dllimport))
-#else
-#define GAZEBO_ENV_EXPORT __declspec(dllexport)
-#define GAZEBO_ENV_IMPORT __declspec(dllimport)
-#endif
-#ifdef GAZEBO_ENV_BUILDING_DLL
-#define GAZEBO_ENV_PUBLIC GAZEBO_ENV_EXPORT
-#else
-#define GAZEBO_ENV_PUBLIC GAZEBO_ENV_IMPORT
-#endif
-#define GAZEBO_ENV_PUBLIC_TYPE GAZEBO_ENV_PUBLIC
-#define GAZEBO_ENV_LOCAL
-#else
-#define GAZEBO_ENV_EXPORT __attribute__((visibility("default")))
-#define GAZEBO_ENV_IMPORT
-#if __GNUC__ >= 4
-#define GAZEBO_ENV_PUBLIC __attribute__((visibility("default")))
-#define GAZEBO_ENV_LOCAL __attribute__((visibility("hidden")))
-#else
-#define GAZEBO_ENV_PUBLIC
-#define GAZEBO_ENV_LOCAL
-#endif
-#define GAZEBO_ENV_PUBLIC_TYPE
-#endif
-
-#endif // GAZEBO_ENV__VISIBILITY_CONTROL_H_
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/hardware/urbot.cpp b/mirage/mirage/ros_ws/src/gazebo_env/hardware/urbot.cpp
deleted file mode 100644
index 899a1b2..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/hardware/urbot.cpp
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2023 ros2_control Development Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "gazebo_env/urbot.hpp"
-
-#include
-#include
-#include
-#include
-#include
-
-#include "hardware_interface/types/hardware_interface_type_values.hpp"
-#include "rclcpp/rclcpp.hpp"
-
-namespace gazebo_env
-{
-hardware_interface::CallbackReturn URBotSystemPositionOnlyHardware::on_init(
- const hardware_interface::HardwareInfo & info)
-{
- if (
- hardware_interface::SystemInterface::on_init(info) !=
- hardware_interface::CallbackReturn::SUCCESS)
- {
- return hardware_interface::CallbackReturn::ERROR;
- }
-
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- hw_start_sec_ = stod(info_.hardware_parameters["example_param_hw_start_duration_sec"]);
- hw_stop_sec_ = stod(info_.hardware_parameters["example_param_hw_stop_duration_sec"]);
- hw_slowdown_ = stod(info_.hardware_parameters["example_param_hw_slowdown"]);
- // END: This part here is for exemplary purposes - Please do not copy to your production code
- hw_states_.resize(info_.joints.size(), std::numeric_limits::quiet_NaN());
- hw_commands_.resize(info_.joints.size(), std::numeric_limits::quiet_NaN());
-
- for (const hardware_interface::ComponentInfo & joint : info_.joints)
- {
- // URBotSystemPositionOnly has exactly one state and command interface on each joint
- if (joint.command_interfaces.size() != 1)
- {
- RCLCPP_FATAL(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"),
- "Joint '%s' has %zu command interfaces found. 1 expected.", joint.name.c_str(),
- joint.command_interfaces.size());
- return hardware_interface::CallbackReturn::ERROR;
- }
-
- if (joint.command_interfaces[0].name != hardware_interface::HW_IF_POSITION)
- {
- RCLCPP_FATAL(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"),
- "Joint '%s' have %s command interfaces found. '%s' expected.", joint.name.c_str(),
- joint.command_interfaces[0].name.c_str(), hardware_interface::HW_IF_POSITION);
- return hardware_interface::CallbackReturn::ERROR;
- }
-
- if (joint.state_interfaces.size() != 1)
- {
- RCLCPP_FATAL(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"),
- "Joint '%s' has %zu state interface. 1 expected.", joint.name.c_str(),
- joint.state_interfaces.size());
- return hardware_interface::CallbackReturn::ERROR;
- }
-
- if (joint.state_interfaces[0].name != hardware_interface::HW_IF_POSITION)
- {
- RCLCPP_FATAL(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"),
- "Joint '%s' have %s state interface. '%s' expected.", joint.name.c_str(),
- joint.state_interfaces[0].name.c_str(), hardware_interface::HW_IF_POSITION);
- return hardware_interface::CallbackReturn::ERROR;
- }
- }
-
- return hardware_interface::CallbackReturn::SUCCESS;
-}
-
-hardware_interface::CallbackReturn URBotSystemPositionOnlyHardware::on_configure(
- const rclcpp_lifecycle::State & /*previous_state*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Configuring ...please wait...");
-
- for (int i = 0; i < hw_start_sec_; i++)
- {
- rclcpp::sleep_for(std::chrono::seconds(1));
- RCLCPP_INFO(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "%.1f seconds left...",
- hw_start_sec_ - i);
- }
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- // reset values always when configuring hardware
- for (uint i = 0; i < hw_states_.size(); i++)
- {
- hw_states_[i] = 0;
- hw_commands_[i] = 0;
- }
-
- RCLCPP_INFO(rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Successfully configured!");
-
- return hardware_interface::CallbackReturn::SUCCESS;
-}
-
-std::vector
-URBotSystemPositionOnlyHardware::export_state_interfaces()
-{
- std::vector state_interfaces;
- for (uint i = 0; i < info_.joints.size(); i++)
- {
- state_interfaces.emplace_back(hardware_interface::StateInterface(
- info_.joints[i].name, hardware_interface::HW_IF_POSITION, &hw_states_[i]));
- }
-
- return state_interfaces;
-}
-
-std::vector
-URBotSystemPositionOnlyHardware::export_command_interfaces()
-{
- std::vector command_interfaces;
- for (uint i = 0; i < info_.joints.size(); i++)
- {
- command_interfaces.emplace_back(hardware_interface::CommandInterface(
- info_.joints[i].name, hardware_interface::HW_IF_POSITION, &hw_commands_[i]));
- }
-
- return command_interfaces;
-}
-
-hardware_interface::CallbackReturn URBotSystemPositionOnlyHardware::on_activate(
- const rclcpp_lifecycle::State & /*previous_state*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Activating ...please wait...");
-
- for (int i = 0; i < hw_start_sec_; i++)
- {
- rclcpp::sleep_for(std::chrono::seconds(1));
- RCLCPP_INFO(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "%.1f seconds left...",
- hw_start_sec_ - i);
- }
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- // command and state should be equal when starting
- for (uint i = 0; i < hw_states_.size(); i++)
- {
- hw_commands_[i] = hw_states_[i];
- }
-
- RCLCPP_INFO(rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Successfully activated!");
-
- return hardware_interface::CallbackReturn::SUCCESS;
-}
-
-hardware_interface::CallbackReturn URBotSystemPositionOnlyHardware::on_deactivate(
- const rclcpp_lifecycle::State & /*previous_state*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Deactivating ...please wait...");
-
- for (int i = 0; i < hw_stop_sec_; i++)
- {
- rclcpp::sleep_for(std::chrono::seconds(1));
- RCLCPP_INFO(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "%.1f seconds left...",
- hw_stop_sec_ - i);
- }
-
- RCLCPP_INFO(rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Successfully deactivated!");
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- return hardware_interface::CallbackReturn::SUCCESS;
-}
-
-hardware_interface::return_type URBotSystemPositionOnlyHardware::read(
- const rclcpp::Time & /*time*/, const rclcpp::Duration & /*period*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Reading...");
-
- for (uint i = 0; i < hw_states_.size(); i++)
- {
- // Simulate URBot's movement
- hw_states_[i] = hw_states_[i] + (hw_commands_[i] - hw_states_[i]) / hw_slowdown_;
- RCLCPP_INFO(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Got state %.5f for joint %d!",
- hw_states_[i], i);
- }
- RCLCPP_INFO(rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Joints successfully read!");
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- return hardware_interface::return_type::OK;
-}
-
-hardware_interface::return_type URBotSystemPositionOnlyHardware::write(
- const rclcpp::Time & /*time*/, const rclcpp::Duration & /*period*/)
-{
- // BEGIN: This part here is for exemplary purposes - Please do not copy to your production code
- RCLCPP_INFO(rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Writing...");
-
- for (uint i = 0; i < hw_commands_.size(); i++)
- {
- // Simulate sending commands to the hardware
- RCLCPP_INFO(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Got command %.5f for joint %d!",
- hw_commands_[i], i);
- }
- RCLCPP_INFO(
- rclcpp::get_logger("URBotSystemPositionOnlyHardware"), "Joints successfully written!");
- // END: This part here is for exemplary purposes - Please do not copy to your production code
-
- return hardware_interface::return_type::OK;
-}
-
-} // namespace gazebo_env
-
-#include "pluginlib/class_list_macros.hpp"
-
-PLUGINLIB_EXPORT_CLASS(
- gazebo_env::URBotSystemPositionOnlyHardware, hardware_interface::SystemInterface)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/no_physics_plugin/no_physics_plugin_with_blocks.cc b/mirage/mirage/ros_ws/src/gazebo_env/no_physics_plugin/no_physics_plugin_with_blocks.cc
deleted file mode 100644
index 6c624fa..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/no_physics_plugin/no_physics_plugin_with_blocks.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-#ifndef _NOPHYSICS_PLUGIN_HH_
-#define _NOPHYSICS_PLUGIN_HH_
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-namespace gazebo
-{
- /// \brief A plugin to control a NoPhysics sensor.
- class NoPhysicsPlugin : public ModelPlugin
- {
- /// \brief Constructor
- public: NoPhysicsPlugin() {}
-
- /// \brief The load function is called by Gazebo when the plugin is
- /// inserted into simulation
- /// \param[in] _model A pointer to the model that this plugin is
- /// attached to.
- /// \param[in] _sdf A pointer to the plugin's SDF element.
- public: virtual void Load(physics::ModelPtr _model, sdf::ElementPtr _sdf)
- {
- // Initialize ROS node
- this->node_ = gazebo_ros::Node::Get(_sdf);
- const gazebo_ros::QoS &qos = this->node_->get_qos();
- // Just output a message for now
- std::cerr << "NO PHYSICS" << "\n";
- _model->GetWorld()->SetPhysicsEnabled(false);
- this->ee_pose_subscriber_ = this->node_->create_subscription(
- "ee_pose",
- qos.get_subscription_qos("ee_pose", rclcpp::QoS(1)),
- std::bind(&NoPhysicsPlugin::eePoseMsg, this, std::placeholders::_1));
-
- this->command_subscriber_ = this->node_->create_subscription(
- "forward_position_controller/commands",
- qos.get_subscription_qos("forward_position_controller/commands", rclcpp::QoS(1)),
- std::bind(&NoPhysicsPlugin::controllerCommandMsg, this, std::placeholders::_1));
- this->ee_pose_publisher_ = this->node_->create_publisher("ee_pose", 10);
- }
-
- void eePoseMsg(const std_msgs::msg::String::SharedPtr msg) {
- if(!set_position_) {
- current_box_str = original_box_str_;
- auto found_name = current_box_str.find("my_model1");
- current_box_str.replace(found_name + 8,1,std::to_string(2 * i_ + 3));
- red_box_name_ = current_box_str.substr(found_name,9);
- auto found_pose = current_box_str.find("");
- current_box_str.replace(found_pose+6, 5, msg->data);
-
- physics::WorldPtr world = physics::get_world("default");
- /*if(green_box_) {
- std::cout << "Green box name: " << green_box_name_ << std::endl;
- physics::ModelPtr green_box_ptr = world->ModelByName(green_box_name_);
- world->RemoveModel(green_box_name_);
- std::cout << "I removed" << std::endl;
- green_box_ptr = nullptr;
- std::cout << "Removed model" << std::endl;
- green_box_ = false;
- }*/
- std::cout << "Putting down red box" << std::endl;
- world->InsertModelString(current_box_str);
- set_position_ = true;
- }
- }
-
- void controllerCommandMsg(const std_msgs::msg::Float64MultiArray::SharedPtr msg) {
- green_box_name_ = red_box_name_;
- green_box_name_.replace(name_length_-1,1,std::to_string(2 * i_ + 4));
- auto found_name = current_box_str.find(red_box_name_);
- current_box_str.replace(found_name, 9, green_box_name_);
- auto found_color = current_box_str.find("Red");
- current_box_str.replace(found_color, 3, "Green");
- physics::WorldPtr world = physics::get_world("default");
- world->InsertModelString(current_box_str);
- std::cout << "Inserting green box" << std::endl;
- world->RemoveModel(red_box_name_);
- green_box_ = true;
- set_position_ = false;
- i_++;
- std::cout << i_ << std::endl;
- usleep(1500000);
- std::cout << "Removing now" << std::endl;
- std::cout << "Green box name: " << green_box_name_ << std::endl;
- physics::ModelPtr green_box_ptr = world->ModelByName(green_box_name_);
- world->RemoveModel(green_box_name_);
- std::cout << "I removed" << std::endl;
- green_box_ptr = nullptr;
- std::cout << "Removed model" << std::endl;
- if(i_ < length_) {
- std::string new_ee_pose = std::to_string(x_[i_]) + " " + std::to_string(y_[i_]) + " " + std::to_string(z_[i_]);
- auto message = std_msgs::msg::String();
- message.data = new_ee_pose;
- ee_pose_publisher_->publish(message);
- }
- }
-
-
- private:
- bool green_box_ = false;
- std::string red_box_name_ = "my_model1";
- std::string green_box_name_ = "";
- int name_length_ = 9;
- int i_ = -1;
- int length_ = 3;
- float x_[3] = {0.4,-0.5,0.817};
- float y_[3] = {0.4,0.6,0.233};
- float z_[3] = {0.4,0.3,0.063};
- gazebo_ros::Node::SharedPtr node_;
- rclcpp::Subscription::SharedPtr ee_pose_subscriber_;
- rclcpp::Subscription::SharedPtr command_subscriber_;
- rclcpp::Publisher::SharedPtr ee_pose_publisher_;
- bool set_position_ = false;
- std::string original_box_str_ = "\n"
-"\n"
-" \n"
-" 0 0 0 0 0 0\n"
-" true\n"
-" \n"
-" \n"
-" 1.0\n"
-" \n"
-" \n"
-" 0.083 \n"
-" 0.0 \n"
-" 0.0 \n"
-" 0.083 \n"
-" 0.0 \n"
-" 0.083 \n"
-" \n"
-" \n"
-" \n"
-" \n"
-" \n"
-" 0.05 0.05 0.05\n"
-" \n"
-" \n"
-" \n"
-" \n"
-" \n"
-" \n"
-" 0.05 0.05 0.05\n"
-" \n"
-" \n"
-" \n"
-" \n"
-" \n"
-" \n"
-" \n"
-" \n"
-"";
- std::string current_box_str = original_box_str_;
-
- };
-
- // Tell Gazebo about this plugin, so that Gazebo can call Load on this plugin.
- GZ_REGISTER_MODEL_PLUGIN(NoPhysicsPlugin)
-}
-#endif
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_early_inpaint_control_plugin/panda_and_panda_grippers_early_inpaint_control_plugin.cc b/mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_to_ur5_gripper_early_inpaint_control_plugin/panda_and_panda_grippers_to_ur5_gripper_early_inpaint_control_plugin.cc
similarity index 85%
rename from mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_early_inpaint_control_plugin/panda_and_panda_grippers_early_inpaint_control_plugin.cc
rename to mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_to_ur5_gripper_early_inpaint_control_plugin/panda_and_panda_grippers_to_ur5_gripper_early_inpaint_control_plugin.cc
index 5ba9e9d..b62941f 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_early_inpaint_control_plugin/panda_and_panda_grippers_early_inpaint_control_plugin.cc
+++ b/mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_to_ur5_gripper_early_inpaint_control_plugin/panda_and_panda_grippers_to_ur5_gripper_early_inpaint_control_plugin.cc
@@ -17,10 +17,10 @@
namespace gazebo
{
/// \brief A plugin to control a NoPhysics sensor.
- class PandaAndPandaGrippersControlEarlyInpaintPlugin : public ModelPlugin
+ class PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin : public ModelPlugin
{
/// \brief Constructor
- public: PandaAndPandaGrippersControlEarlyInpaintPlugin() {}
+ public: PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin() {}
/// \brief The load function is called by Gazebo when the plugin is
/// inserted into simulation
@@ -38,7 +38,7 @@ namespace gazebo
this->robot_subscriber_ = this->node_->create_subscription(
"joint_commands",
qos.get_subscription_qos("joint_commands", rclcpp::QoS(1)),
- std::bind(&PandaAndPandaGrippersControlEarlyInpaintPlugin::jointCommandMsg, this, std::placeholders::_1));
+ std::bind(&PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin::jointCommandMsg, this, std::placeholders::_1));
this->gazebo_joint_state_publisher_ = this->node_->create_publisher("/gazebo_joint_states",1);
// ROS2 Message Filter Tutorial: https://answers.ros.org/question/366440/ros-2-message_filters-timesynchronizer-minimal-example-does-not-reach-callback-function/
@@ -47,7 +47,7 @@ namespace gazebo
this->rgb_subscriber_.subscribe(this->node_, "/panda_camera/image_raw", rmw_qos_profile);
this->depth_subscriber_.subscribe(this->node_, "/panda_camera/depth/image_raw", rmw_qos_profile);
this->time_synchronizer_ = std::make_shared>(rgb_subscriber_, depth_subscriber_,1);
- this->time_synchronizer_->registerCallback(std::bind(&PandaAndPandaGrippersControlEarlyInpaintPlugin::imageCallback, this, std::placeholders::_1, std::placeholders::_2));
+ this->time_synchronizer_->registerCallback(std::bind(&PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin::imageCallback, this, std::placeholders::_1, std::placeholders::_2));
}
@@ -58,6 +58,12 @@ namespace gazebo
}
void jointCommandMsg(const std_msgs::msg::Float64MultiArray::SharedPtr msg) {
std::cout << "Start publish" << std::endl;
+ int command_size = sizeof(msg->data) / sizeof(msg->data[0]);
+ std::cout << "Command size: " << command_size << std::endl;
+ for(int i = 0; i < 22;i++) {
+ std::cout << "Command: " << msg->data[i] << std::endl;
+ }
+
physics::WorldPtr world = physics::get_world("default");
auto model_ptr = world->ModelByName(model_name_);
auto panda_with_ur5_gripper_joint1 = model_ptr->GetJoint("panda_with_ur5_gripper_joint1");
@@ -92,6 +98,7 @@ namespace gazebo
auto panda_no_gripper_joint5 = model_ptr->GetJoint("panda_no_gripper_joint5");
auto panda_no_gripper_joint6 = model_ptr->GetJoint("panda_no_gripper_joint6");
auto panda_no_gripper_joint7 = model_ptr->GetJoint("panda_no_gripper_joint7");
+
// Needs to coordinate with custon_joint_state_publisher_node.py
panda_joint1->SetPosition(0,msg->data[0]);
panda_joint2->SetPosition(0,msg->data[1]);
@@ -100,7 +107,7 @@ namespace gazebo
panda_joint5->SetPosition(0,msg->data[4]);
panda_joint6->SetPosition(0,msg->data[5]);
panda_joint7->SetPosition(0,msg->data[6]);
-
+
panda_no_gripper_joint1->SetPosition(0,msg->data[0]);
panda_no_gripper_joint2->SetPosition(0,msg->data[1]);
panda_no_gripper_joint3->SetPosition(0,msg->data[2]);
@@ -108,7 +115,8 @@ namespace gazebo
panda_no_gripper_joint5->SetPosition(0,msg->data[4]);
panda_no_gripper_joint6->SetPosition(0,msg->data[5]);
panda_no_gripper_joint7->SetPosition(0,msg->data[6]);
-
+ std::cout << "This far" << std::endl;
+
panda_with_ur5_gripper_joint1->SetPosition(0,msg->data[7]);
panda_with_ur5_gripper_joint2->SetPosition(0,msg->data[8]);
panda_with_ur5_gripper_joint3->SetPosition(0,msg->data[9]);
@@ -116,20 +124,22 @@ namespace gazebo
panda_with_ur5_gripper_joint5->SetPosition(0,msg->data[11]);
panda_with_ur5_gripper_joint6->SetPosition(0,msg->data[12]);
panda_with_ur5_gripper_joint7->SetPosition(0,msg->data[13]);
+
- left_knuckle->SetPosition(0,0.0);
- right_knuckle->SetPosition(0,0.0);
- left_inner_knuckle->SetPosition(0,0.0);
- right_inner_knuckle->SetPosition(0,0.0);
- left_finger->SetPosition(0,0.0);
- right_finger->SetPosition(0,0.0);
+ left_knuckle->SetPosition(0,msg->data[16]);
+ right_knuckle->SetPosition(0,msg->data[17]);
+ left_inner_knuckle->SetPosition(0,msg->data[18]);
+ right_inner_knuckle->SetPosition(0,msg->data[19]);
+ left_finger->SetPosition(0,-msg->data[20]);
+ right_finger->SetPosition(0,-msg->data[21]);
- panda_finger_joint1->SetPosition(0,0.04);
- panda_finger_joint2->SetPosition(0,0.04);
+ panda_finger_joint1->SetPosition(0,msg->data[14]);
+ panda_finger_joint2->SetPosition(0,msg->data[15]);
+
auto message = sensor_msgs::msg::JointState();
message.header.stamp = this->node_->get_clock() ->now();
message.name = {"panda_joint1","panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7","panda_no_gripper_joint1","panda_no_gripper_joint2","panda_no_gripper_joint3","panda_no_gripper_joint4","panda_no_gripper_joint5","panda_no_gripper_joint6","panda_no_gripper_joint7","panda_with_ur5_gripper_joint1", "panda_with_ur5_gripper_joint2", "panda_with_ur5_gripper_joint3","panda_with_ur5_gripper_joint4", "panda_with_ur5_gripper_joint5", "panda_with_ur5_gripper_joint6","panda_with_ur5_gripper_joint7","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint","panda_finger_joint1","panda_finger_joint2"};
- message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[7],msg->data[8],msg->data[9],msg->data[10],msg->data[11],msg->data[12],msg->data[13],0.0,0.0,0.0,0.0,0.0,0.0,0.04,0.04};
+ message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[7],msg->data[8],msg->data[9],msg->data[10],msg->data[11],msg->data[12],msg->data[13],msg->data[16],msg->data[17],msg->data[18],msg->data[19],-msg->data[20],-msg->data[21],msg->data[14],msg->data[15]};
//Delay for image to catch up to joint position update
usleep(500000);
this->gazebo_joint_state_publisher_->publish(message);
@@ -151,6 +161,6 @@ namespace gazebo
};
// Tell Gazebo about this plugin, so that Gazebo can call Load on this plugin.
- GZ_REGISTER_MODEL_PLUGIN(PandaAndPandaGrippersControlEarlyInpaintPlugin)
+ GZ_REGISTER_MODEL_PLUGIN(PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin)
}
#endif
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_control_plugin/panda_and_panda_grippers_control_plugin.cc b/mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_to_ur5_gripper_wrist_early_inpaint_control_plugin/panda_and_panda_grippers_to_ur5_gripper_wrist_early_inpaint_control_plugin.cc
similarity index 64%
rename from mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_control_plugin/panda_and_panda_grippers_control_plugin.cc
rename to mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_to_ur5_gripper_wrist_early_inpaint_control_plugin/panda_and_panda_grippers_to_ur5_gripper_wrist_early_inpaint_control_plugin.cc
index 9e7bded..ca7db1e 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_control_plugin/panda_and_panda_grippers_control_plugin.cc
+++ b/mirage/mirage/ros_ws/src/gazebo_env/panda_and_panda_grippers_to_ur5_gripper_wrist_early_inpaint_control_plugin/panda_and_panda_grippers_to_ur5_gripper_wrist_early_inpaint_control_plugin.cc
@@ -17,10 +17,10 @@
namespace gazebo
{
/// \brief A plugin to control a NoPhysics sensor.
- class PandaAndPandaGrippersControlPlugin : public ModelPlugin
+ class PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin : public ModelPlugin
{
/// \brief Constructor
- public: PandaAndPandaGrippersControlPlugin() {}
+ public: PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin() {}
/// \brief The load function is called by Gazebo when the plugin is
/// inserted into simulation
@@ -34,11 +34,11 @@ namespace gazebo
this->model_name_ = _model->GetName();
const gazebo_ros::QoS &qos = this->node_->get_qos();
// Just output a message for now
- std::cerr << "KUSHTIMUS PRIME PANDA AND PANDA GRIPPERS" << "\n";
+ std::cerr << "KUSHTIMUS PRIME PANDA AND PANDA EARLY INPAINT GRIPPERS" << "\n";
this->robot_subscriber_ = this->node_->create_subscription(
"joint_commands",
qos.get_subscription_qos("joint_commands", rclcpp::QoS(1)),
- std::bind(&PandaAndPandaGrippersControlPlugin::jointCommandMsg, this, std::placeholders::_1));
+ std::bind(&PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin::jointCommandMsg, this, std::placeholders::_1));
this->gazebo_joint_state_publisher_ = this->node_->create_publisher("/gazebo_joint_states",1);
// ROS2 Message Filter Tutorial: https://answers.ros.org/question/366440/ros-2-message_filters-timesynchronizer-minimal-example-does-not-reach-callback-function/
@@ -47,7 +47,7 @@ namespace gazebo
this->rgb_subscriber_.subscribe(this->node_, "/panda_camera/image_raw", rmw_qos_profile);
this->depth_subscriber_.subscribe(this->node_, "/panda_camera/depth/image_raw", rmw_qos_profile);
this->time_synchronizer_ = std::make_shared>(rgb_subscriber_, depth_subscriber_,1);
- this->time_synchronizer_->registerCallback(std::bind(&PandaAndPandaGrippersControlPlugin::imageCallback, this, std::placeholders::_1, std::placeholders::_2));
+ this->time_synchronizer_->registerCallback(std::bind(&PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin::imageCallback, this, std::placeholders::_1, std::placeholders::_2));
}
@@ -58,15 +58,21 @@ namespace gazebo
}
void jointCommandMsg(const std_msgs::msg::Float64MultiArray::SharedPtr msg) {
std::cout << "Start publish" << std::endl;
+ int command_size = sizeof(msg->data) / sizeof(msg->data[0]);
+ std::cout << "Command size: " << command_size << std::endl;
+ for(int i = 0; i < 22;i++) {
+ std::cout << "Command: " << msg->data[i] << std::endl;
+ }
+
physics::WorldPtr world = physics::get_world("default");
auto model_ptr = world->ModelByName(model_name_);
- auto panda_with_ur5_gripper_joint1 = model_ptr->GetJoint("panda_with_ur5_gripper_joint1");
- auto panda_with_ur5_gripper_joint2 = model_ptr->GetJoint("panda_with_ur5_gripper_joint2");
- auto panda_with_ur5_gripper_joint3 = model_ptr->GetJoint("panda_with_ur5_gripper_joint3");
- auto panda_with_ur5_gripper_joint4 = model_ptr->GetJoint("panda_with_ur5_gripper_joint4");
- auto panda_with_ur5_gripper_joint5 = model_ptr->GetJoint("panda_with_ur5_gripper_joint5");
- auto panda_with_ur5_gripper_joint6 = model_ptr->GetJoint("panda_with_ur5_gripper_joint6");
- auto panda_with_ur5_gripper_joint7 = model_ptr->GetJoint("panda_with_ur5_gripper_joint7");
+ auto panda_with_ur5_gripper_joint1 = model_ptr->GetJoint("panda_with_ur5_gripper_wrist_joint1");
+ auto panda_with_ur5_gripper_joint2 = model_ptr->GetJoint("panda_with_ur5_gripper_wrist_joint2");
+ auto panda_with_ur5_gripper_joint3 = model_ptr->GetJoint("panda_with_ur5_gripper_wrist_joint3");
+ auto panda_with_ur5_gripper_joint4 = model_ptr->GetJoint("panda_with_ur5_gripper_wrist_joint4");
+ auto panda_with_ur5_gripper_joint5 = model_ptr->GetJoint("panda_with_ur5_gripper_wrist_joint5");
+ auto panda_with_ur5_gripper_joint6 = model_ptr->GetJoint("panda_with_ur5_gripper_wrist_joint6");
+ auto panda_with_ur5_gripper_joint7 = model_ptr->GetJoint("panda_with_ur5_gripper_wrist_joint7");
auto left_knuckle = model_ptr->GetJoint("robotiq_85_left_knuckle_joint");
auto right_knuckle = model_ptr->GetJoint("robotiq_85_right_knuckle_joint");
@@ -84,6 +90,15 @@ namespace gazebo
auto panda_joint7 = model_ptr->GetJoint("panda_joint7");
auto panda_finger_joint1 = model_ptr->GetJoint("panda_finger_joint1");
auto panda_finger_joint2 = model_ptr->GetJoint("panda_finger_joint2");
+
+ auto panda_no_gripper_joint1 = model_ptr->GetJoint("panda_no_gripper_joint1");
+ auto panda_no_gripper_joint2 = model_ptr->GetJoint("panda_no_gripper_joint2");
+ auto panda_no_gripper_joint3 = model_ptr->GetJoint("panda_no_gripper_joint3");
+ auto panda_no_gripper_joint4 = model_ptr->GetJoint("panda_no_gripper_joint4");
+ auto panda_no_gripper_joint5 = model_ptr->GetJoint("panda_no_gripper_joint5");
+ auto panda_no_gripper_joint6 = model_ptr->GetJoint("panda_no_gripper_joint6");
+ auto panda_no_gripper_joint7 = model_ptr->GetJoint("panda_no_gripper_joint7");
+
// Needs to coordinate with custon_joint_state_publisher_node.py
panda_joint1->SetPosition(0,msg->data[0]);
panda_joint2->SetPosition(0,msg->data[1]);
@@ -92,7 +107,16 @@ namespace gazebo
panda_joint5->SetPosition(0,msg->data[4]);
panda_joint6->SetPosition(0,msg->data[5]);
panda_joint7->SetPosition(0,msg->data[6]);
-
+
+ panda_no_gripper_joint1->SetPosition(0,msg->data[0]);
+ panda_no_gripper_joint2->SetPosition(0,msg->data[1]);
+ panda_no_gripper_joint3->SetPosition(0,msg->data[2]);
+ panda_no_gripper_joint4->SetPosition(0,msg->data[3]);
+ panda_no_gripper_joint5->SetPosition(0,msg->data[4]);
+ panda_no_gripper_joint6->SetPosition(0,msg->data[5]);
+ panda_no_gripper_joint7->SetPosition(0,msg->data[6]);
+
+
panda_with_ur5_gripper_joint1->SetPosition(0,msg->data[7]);
panda_with_ur5_gripper_joint2->SetPosition(0,msg->data[8]);
panda_with_ur5_gripper_joint3->SetPosition(0,msg->data[9]);
@@ -100,20 +124,23 @@ namespace gazebo
panda_with_ur5_gripper_joint5->SetPosition(0,msg->data[11]);
panda_with_ur5_gripper_joint6->SetPosition(0,msg->data[12]);
panda_with_ur5_gripper_joint7->SetPosition(0,msg->data[13]);
+ std::cout << "This far" << std::endl;
+
- left_knuckle->SetPosition(0,0.0);
- right_knuckle->SetPosition(0,0.0);
- left_inner_knuckle->SetPosition(0,0.0);
- right_inner_knuckle->SetPosition(0,0.0);
- left_finger->SetPosition(0,0.0);
- right_finger->SetPosition(0,0.0);
+ left_knuckle->SetPosition(0,msg->data[16]);
+ right_knuckle->SetPosition(0,msg->data[17]);
+ left_inner_knuckle->SetPosition(0,msg->data[18]);
+ right_inner_knuckle->SetPosition(0,msg->data[19]);
+ left_finger->SetPosition(0,-msg->data[20]);
+ right_finger->SetPosition(0,-msg->data[21]);
- panda_finger_joint1->SetPosition(0,0.04);
- panda_finger_joint2->SetPosition(0,0.04);
+ panda_finger_joint1->SetPosition(0,msg->data[14]);
+ panda_finger_joint2->SetPosition(0,msg->data[15]);
+
auto message = sensor_msgs::msg::JointState();
message.header.stamp = this->node_->get_clock() ->now();
- message.name = {"panda_joint1","panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7","panda_with_ur5_gripper_joint1", "panda_with_ur5_gripper_joint2", "panda_with_ur5_gripper_joint3","panda_with_ur5_gripper_joint4", "panda_with_ur5_gripper_joint5", "panda_with_ur5_gripper_joint6","panda_with_ur5_gripper_joint7","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint","panda_finger_joint1","panda_finger_joint2"};
- message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[7],msg->data[8],msg->data[9],msg->data[10],msg->data[11],msg->data[12],msg->data[13],0.0,0.0,0.0,0.0,0.0,0.0,0.04,0.04};
+ message.name = {"panda_joint1","panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7","panda_no_gripper_joint1","panda_no_gripper_joint2","panda_no_gripper_joint3","panda_no_gripper_joint4","panda_no_gripper_joint5","panda_no_gripper_joint6","panda_no_gripper_joint7","panda_with_ur5_gripper_joint1", "panda_with_ur5_gripper_joint2", "panda_with_ur5_gripper_joint3","panda_with_ur5_gripper_joint4", "panda_with_ur5_gripper_joint5", "panda_with_ur5_gripper_joint6","panda_with_ur5_gripper_joint7","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint","panda_finger_joint1","panda_finger_joint2"};
+ message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[7],msg->data[8],msg->data[9],msg->data[10],msg->data[11],msg->data[12],msg->data[13],msg->data[16],msg->data[17],msg->data[18],msg->data[19],-msg->data[20],-msg->data[21],msg->data[14],msg->data[15]};
//Delay for image to catch up to joint position update
usleep(500000);
this->gazebo_joint_state_publisher_->publish(message);
@@ -135,6 +162,6 @@ namespace gazebo
};
// Tell Gazebo about this plugin, so that Gazebo can call Load on this plugin.
- GZ_REGISTER_MODEL_PLUGIN(PandaAndPandaGrippersControlPlugin)
+ GZ_REGISTER_MODEL_PLUGIN(PandaAndPandaGrippersToUR5GripperControlEarlyInpaintPlugin)
}
#endif
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/panda_no_gripper_control_plugin/panda_no_gripper_control_plugin.cc b/mirage/mirage/ros_ws/src/gazebo_env/panda_no_gripper_control_plugin/panda_no_gripper_control_plugin.cc
deleted file mode 100644
index 9df1c55..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/panda_no_gripper_control_plugin/panda_no_gripper_control_plugin.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-#ifndef _PANDACONTROL_PLUGIN_HH_
-#define _PANDACONTROL_PLUGIN_HH_
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-namespace gazebo
-{
- /// \brief A plugin to control a NoPhysics sensor.
- class PandaControlPlugin : public ModelPlugin
- {
- /// \brief Constructor
- public: PandaControlPlugin() {}
-
- /// \brief The load function is called by Gazebo when the plugin is
- /// inserted into simulation
- /// \param[in] _model A pointer to the model that this plugin is
- /// attached to.
- /// \param[in] _sdf A pointer to the plugin's SDF element.
- public: virtual void Load(physics::ModelPtr _model, sdf::ElementPtr _sdf)
- {
- // Initialize ROS node
- this->node_ = gazebo_ros::Node::Get(_sdf);
- this->model_name_ = _model->GetName();
- const gazebo_ros::QoS &qos = this->node_->get_qos();
- // Just output a message for now
- std::cerr << "KUSHTIMUS PRIME PANDA NO GRIPPER" << "\n";
- this->robot_subscriber_ = this->node_->create_subscription(
- "joint_commands",
- qos.get_subscription_qos("joint_commands", rclcpp::QoS(1)),
- std::bind(&PandaControlPlugin::jointCommandMsg, this, std::placeholders::_1));
- this->gazebo_joint_state_publisher_ = this->node_->create_publisher("/gazebo_joint_states",1);
-
- // ROS2 Message Filter Tutorial: https://answers.ros.org/question/366440/ros-2-message_filters-timesynchronizer-minimal-example-does-not-reach-callback-function/
- rclcpp::QoS qos_(1);
- auto rmw_qos_profile = qos_.get_rmw_qos_profile();
- this->rgb_subscriber_.subscribe(this->node_, "/panda_camera/image_raw", rmw_qos_profile);
- this->depth_subscriber_.subscribe(this->node_, "/panda_camera/depth/image_raw", rmw_qos_profile);
- this->time_synchronizer_ = std::make_shared>(rgb_subscriber_, depth_subscriber_,1);
- this->time_synchronizer_->registerCallback(std::bind(&PandaControlPlugin::imageCallback, this, std::placeholders::_1, std::placeholders::_2));
-
- }
-
- //Needs to be const ConstSharedPtr: https://robotics.stackexchange.com/questions/102503/ros2-message-filters-synchronizer-compilation-error
- void imageCallback(const sensor_msgs::msg::Image::ConstSharedPtr rgb_msg,const sensor_msgs::msg::Image::ConstSharedPtr depth_msg) {
- this->rgb_msg_ = rgb_msg;
- this->depth_msg_ = depth_msg;
- }
- void jointCommandMsg(const std_msgs::msg::Float64MultiArray::SharedPtr msg) {
- std::cout << "Start publish" << std::endl;
- physics::WorldPtr world = physics::get_world("default");
- auto model_ptr = world->ModelByName(model_name_);
- auto panda_joint1 = model_ptr->GetJoint("panda_joint1");
- auto panda_joint2 = model_ptr->GetJoint("panda_joint2");
- auto panda_joint3 = model_ptr->GetJoint("panda_joint3");
- auto panda_joint4 = model_ptr->GetJoint("panda_joint4");
- auto panda_joint5 = model_ptr->GetJoint("panda_joint5");
- auto panda_joint6 = model_ptr->GetJoint("panda_joint6");
- auto panda_joint7 = model_ptr->GetJoint("panda_joint7");
- // Needs to coordinate with custon_joint_state_publisher_node.py
- panda_joint1->SetPosition(0,msg->data[0]);
- panda_joint2->SetPosition(0,msg->data[1]);
- panda_joint3->SetPosition(0,msg->data[2]);
- panda_joint4->SetPosition(0,msg->data[3]);
- panda_joint5->SetPosition(0,msg->data[4]);
- panda_joint6->SetPosition(0,msg->data[5]);
- panda_joint7->SetPosition(0,msg->data[6]);
- auto message = sensor_msgs::msg::JointState();
- message.header.stamp = this->node_->get_clock() ->now();
- message.name = {"panda_joint1","panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7"};
- message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6]};
- //Delay for image to catch up to joint position update
- //usleep(100000);
- this->gazebo_joint_state_publisher_->publish(message);
- std::cout << "Finished publish" << std::endl;
- }
-
-
- private:
- gazebo_ros::Node::SharedPtr node_;
- std::string model_name_;
- rclcpp::Subscription::SharedPtr robot_subscriber_;
- rclcpp::Publisher::SharedPtr gazebo_joint_state_publisher_;
- message_filters::Subscriber rgb_subscriber_;
- message_filters::Subscriber depth_subscriber_;
- std::shared_ptr> time_synchronizer_;
- sensor_msgs::msg::Image::ConstSharedPtr rgb_msg_;
- sensor_msgs::msg::Image::ConstSharedPtr depth_msg_;
-
- };
-
- // Tell Gazebo about this plugin, so that Gazebo can call Load on this plugin.
- GZ_REGISTER_MODEL_PLUGIN(PandaControlPlugin)
-}
-#endif
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/real_check/check_panda.py b/mirage/mirage/ros_ws/src/gazebo_env/real_check/check_panda.py
deleted file mode 100644
index f40f4b0..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/real_check/check_panda.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import numpy as np
-import cv2
-check_panda_np = np.load('check_panda.npy',allow_pickle=True)
-check_panda_dict = check_panda_np.tolist()
-cam_extrinsic_matrix = check_panda_dict['cam_extrinsic_matrix']
-cam_intrinsic_matrix = check_panda_dict['cam_intrinsic_matrix']
-img0 = check_panda_dict['img_0']
-img1 = check_panda_dict['img_1']
-img2 = check_panda_dict['img_2']
-img3 = check_panda_dict['img_3']
-img4 = check_panda_dict['img_4']
-
-cv2.imwrite('/home/benchturtle/img0.png',img0['img'])
-cv2.imwrite('/home/benchturtle/img1.png',img1['img'])
-cv2.imwrite('/home/benchturtle/img2.png',img2['img'])
-cv2.imwrite('/home/benchturtle/img3.png',img3['img'])
-cv2.imwrite('/home/benchturtle/img4.png',img4['img'])
-imgs = [img0,img1,img2,img3,img4]
-import pdb
-pdb.set_trace()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/real_check/check_ur5e.py b/mirage/mirage/ros_ws/src/gazebo_env/real_check/check_ur5e.py
deleted file mode 100644
index 46ef165..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/real_check/check_ur5e.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import numpy as np
-import cv2
-check_panda_np = np.load('check_ur5.npy',allow_pickle=True)
-check_panda_dict = check_panda_np.tolist()
-cam_extrinsic_matrix = check_panda_dict['cam_extrinsic_matrix']
-cam_intrinsic_matrix = check_panda_dict['cam_intrinsic_matrix']
-img0 = check_panda_dict['img_0']
-img1 = check_panda_dict['img_1']
-img2 = check_panda_dict['img_2']
-img3 = check_panda_dict['img_3']
-img4 = check_panda_dict['img_4']
-imgs = [img0,img1,img2,img3,img4]
-for i in range(len(imgs)):
- img_data = imgs[i]
- rgb = img_data['img']
- depth = img_data['depth']
- joint_angles = img_data['joint_angles']
- ee_pose = img_data['ee_pose']
- cv2.imwrite('ur5e_check_'+str(i)+'.png',rgb)
- np.save('ur5e_depth_'+str(i)+'.npy',depth)
- print("IMG " + str(i))
- print("Joint angles: " + str(joint_angles))
- print("EE Pose: " + str(ee_pose))
- print("\n")
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_find_hand_to_ee_panda.py b/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_find_hand_to_ee_panda.py
deleted file mode 100644
index 05c106f..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_find_hand_to_ee_panda.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import numpy as np
-
-hand_to_world = np.array(
- [
- [0.982, -0.136, 0.129, -0.045],
- [-0.136, -0.991, -0.011, -0.032],
- [0.129, -0.006, -0.992 , 1.110],
- [0.000, 0.000, 0.000, 1.000],
- ]
-)
-
-
-
-world_to_ee = np.array(
- [
- [0.982, -0.136, 0.129, -0.09123698],
- [-0.136, -0.991, -0.006, -0.03099231],
- [0.129, -0.011, -0.992 , 1.01004142],
- [0.000, 0.000, 0.000, 1.000],
- ]
-)
-print(hand_to_world @ world_to_ee)
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_find_tool0_to_ee_ur5e.py b/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_find_tool0_to_ee_ur5e.py
deleted file mode 100644
index 53c5e11..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_find_tool0_to_ee_ur5e.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import numpy as np
-
-tool0_to_world = np.array(
- [
- [0.982, -0.135, 0.133, -0.048],
- [-0.135, -0.991, -0.010, -0.032],
- [0.134, -0.008, -0.991, 1.155],
- [0.000, 0.000, 0.000, 1.000],
- ]
-)
-world_to_ee = np.array(
- [
- [0.982, -0.135, 0.134, -0.09123698],
- [-0.135, -0.991, -0.008, -0.03099231],
- [0.133 ,-0.010 ,-0.991 , 1.01004142],
- [0.000, 0.000, 0.000, 1.000],
- ]
-)
-print(tool0_to_world @ world_to_ee)
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_find_world_to_cam_robosuite.py b/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_find_world_to_cam_robosuite.py
deleted file mode 100644
index 66f2321..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_find_world_to_cam_robosuite.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import numpy as np
-
-world_to_image = np.array(
- [
- [0, 0.70614724 , -0.70806503,0.5],
- [1, 0, 0,0],
- [0, -0.70806503, -0.70614724,1.35],
- [0,0,0,1]
- ]
-)
-image_to_cam = np.array(
- [
- [0,-1,0,0],
- [0,0,-1,0],
- [1,0,0,0],
- [0,0,0,1]
- ]
-)
-world_to_cam = world_to_image @ image_to_cam
-cam_to_world = np.linalg.inv(world_to_cam)
-print(cam_to_world)
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test1.py b/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test1.py
deleted file mode 100644
index 091b56c..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test1.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import cv2
-import numpy as np
-import json
-
-panda_output = np.load('Panda_output.npy',allow_pickle=True)
-ur5e_output = np.load('UR5e_output1.npy',allow_pickle=True)
-# Convert dictionary to a string
-dict_str = str(panda_output[0])
-
-# Write the dictionary string to a text file
-with open('panda.txt', 'w') as file:
- file.write(dict_str)
-
-# Convert dictionary to a string
-dict_str = str(ur5e_output[0])
-
-# Write the dictionary string to a text file
-with open('ur5e.txt', 'w') as file:
- file.write(dict_str)
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test2.py b/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test2.py
deleted file mode 100644
index 9ccaa60..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test2.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import cv2
-import numpy as np
-import json
-
-panda_output = np.load('Panda_output.npy',allow_pickle=True)
-# Convert dictionary to a string
-panda_output_dict = panda_output[6]
-joints = panda_output_dict['joint_angles']
-print(joints)
-ee_pos = panda_output_dict['robot_eef_pos']
-print(ee_pos)
-ee_quat = panda_output_dict['robot_eef_quat']
-print(ee_quat)
-frontview_image = panda_output_dict['frontview']['rgb']
-cv2.imwrite('frontview_image.png',frontview_image)
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test3.py b/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test3.py
deleted file mode 100644
index ba3410c..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test3.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import cv2
-import numpy as np
-import json
-import open3d as o3d
-from scipy.spatial.transform import Rotation as R
-
-
-def project_points_from_world_to_camera(points, world_to_camera_transform, camera_height, camera_width):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
-
-# panda_output = np.load('Panda_output.npy',allow_pickle=True)
-panda_output = np.load('Panda_output.npy',allow_pickle=True)
-
-# Convert dictionary to a string
-panda_output_dict = panda_output[6]
-joints = panda_output_dict['joint_angles']
-print(joints)
-ee_pos = panda_output_dict['robot_eef_pos']
-print(ee_pos)
-ee_quat = panda_output_dict['robot_eef_quat']
-print(ee_quat)
-rotation_ee = R.from_quat(ee_quat)
-rotation_matrix_ee = rotation_ee.as_matrix()
-print(rotation_matrix_ee)
-translation_ee = np.array([1.6,0,1.45])
-frontview_image = panda_output_dict['frontview']['rgb']
-cv2.imwrite('frontview_image.png',frontview_image)
-points = panda_output_dict['frontview']['points']
-points_np = np.array(points)
-points_np = points_np[:,:3]
-
-
-# Creating an Open3D PointCloud object from the numpy array
-pcd = o3d.geometry.PointCloud()
-pcd.points = o3d.utility.Vector3dVector(points_np)
-color = [1.0, 0.0, 0.0]
-pcd.colors = o3d.utility.Vector3dVector(np.tile(color, (points_np.shape[0], 1)))
-world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
-pcd.transform(np.linalg.inv(world_to_camera))
-gazebo_output = np.load('panda_gazebo_pointcloud.npy')
-pcd_gazebo = o3d.geometry.PointCloud()
-pcd_gazebo.points = o3d.utility.Vector3dVector(gazebo_output)
-color = [0.0,0.0,1.0]
-pcd_gazebo.colors = o3d.utility.Vector3dVector(np.tile(color, (gazebo_output.shape[0], 1)))
-mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame()
-# Visualizing the point cloud
-
-gazebo_output = gazebo_output.astype('float64')
-rvec = np.array([0,0,0]).reshape(3,1)
-rvec = rvec.astype('float64')
-tvec = np.array([0,0,0]).reshape(3,1)
-tvec = tvec.astype('float64')
-camera_matrix = np.array([[309.01933598, 0. , 128. ],
- [ 0. , 309.01933598, 128. ],
- [ 0. , 0. , 1. ]])
-dist_coeffs = np.array([0,0,0,0])
-dist_coeffs = dist_coeffs.astype('float64')
-
-image_points,jacobian = cv2.projectPoints(gazebo_output,rvec,tvec,camera_matrix,dist_coeffs)
-gazebo_mask = np.zeros((256,256), dtype=np.uint8)
-int_image_points = np.round(image_points).astype(int)
-int_image_points = int_image_points.reshape(int_image_points.shape[0],2)
-gazebo_mask[int_image_points[:,1],int_image_points[:,0]] = 255
-cv2.imwrite('gazebo_mask.png',gazebo_mask)
-robosuite_seg = panda_output_dict['frontview']['seg']
-robosuite_seg *= 255
-cv2.imwrite('robosuite_mask.png',robosuite_seg)
-cv2.imwrite('diff.png',abs(robosuite_seg.squeeze() - gazebo_mask))
-# o3d.visualization.draw_geometries([pcd,pcd_gazebo,mesh_coordinate_frame])
-camera_transform = np.hstack((camera_matrix,np.array([0,0,0]).reshape(3,1)))
-camera_transform = np.vstack((camera_transform,np.array([0,0,0,1]).reshape(1,4)))
-robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = project_points_from_world_to_camera(gazebo_output,camera_transform,256,256)
-gazebo_camera_matrix = np.array([[528.433756558705,0,320.5],
- [0,528.433756558705,240.5],
- [0,0,1]])
-gazebo_camera_transform = np.hstack((gazebo_camera_matrix,np.array([0,0,0]).reshape(3,1)))
-gazebo_camera_transform = np.vstack((gazebo_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
-_,points_to_pixels_gazebo,pixels_to_points_gazebo = project_points_from_world_to_camera(gazebo_output,gazebo_camera_transform,480,640)
-gazebo_image = cv2.imread('gazebo_image.png')
-inpainted_image = cv2.imread('frontview_image.png')
-for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_image[gazebo_pixel[0],gazebo_pixel[1] - 5,:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- inpainted_image[pixel[0],pixel[1],:] = mean_rgb_value
-cv2.imwrite('inpaint.png',inpainted_image)
-mask2 = np.zeros((256,256), dtype=np.uint8)
-mask2[robosuite_img_points[:,0],robosuite_img_points[:,1]] = 255
-cv2.imwrite('gazebo_robo_diff.png',abs(mask2 - gazebo_mask))
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test4.py b/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test4.py
deleted file mode 100644
index f8e42f9..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/robotsuite_outputs/robosuite_test4.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import cv2
-import numpy as np
-import json
-import open3d as o3d
-from scipy.spatial.transform import Rotation as R
-
-def normalize_depth_image(depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
-ur5e_output = np.load('UR5e_output3.npy',allow_pickle=True)
-# Convert dictionary to a string
-ur5e_output_dict = ur5e_output.item()
-joints = ur5e_output_dict['joint_angles']
-np.save('joints.npy',joints)
-print(joints)
-ee_pos = ur5e_output_dict['robot_eef_pos']
-print(ee_pos)
-ee_quat = ur5e_output_dict['robot_eef_quat']
-print(ee_quat)
-rotation_ee = R.from_quat(ee_quat)
-rotation_matrix_ee = rotation_ee.as_matrix()
-print(rotation_matrix_ee)
-translation_ee = np.array([1.6,0,1.45])
-agentview_image = ur5e_output_dict['agentview']['rgb']
-agentview_image = cv2.cvtColor(agentview_image,cv2.COLOR_BGR2RGB)
-cv2.imwrite('rgb.png',agentview_image)
-points = ur5e_output_dict['agentview']['points']
-points_np = np.array(points)
-points_np = points_np[:,:3]
-
-
-# Creating an Open3D PointCloud object from the numpy array
-pcd = o3d.geometry.PointCloud()
-pcd.points = o3d.utility.Vector3dVector(points_np)
-color = [1.0, 0.0, 0.0]
-pcd.colors = o3d.utility.Vector3dVector(np.tile(color, (points_np.shape[0], 1)))
-world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
-pcd.transform(np.linalg.inv(world_to_camera))
-# Convert Open3D point cloud to NumPy array
-pcd_points = np.asarray(pcd.points)
-print(pcd_points.shape)
-np.save('pointcloud.npy',pcd_points)
-robosuite_seg = ur5e_output_dict['agentview']['seg']
-robosuite_seg *= 255
-cv2.imwrite('seg.png',robosuite_seg)
-real_depth_map = ur5e_output_dict['agentview']['real_depth_map']
-np.save('depth.npy',real_depth_map)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/convert_depth_to_pointcloud.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/convert_depth_to_pointcloud.py
deleted file mode 100644
index 445a77d..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/convert_depth_to_pointcloud.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import open3d as o3d
-import numpy as np
-import cv2
-
-def normalize_depth_image(depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
-rgbd = np.load('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_1/rgbd.npy')
-seg = cv2.imread('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_1/seg.jpg',0)
-_, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
-intrinsic = np.array([[528.433756558705,0,320.5],[0,528.433756558705,240.5],[0,0,1]])
-color_image = rgbd[:,:,0:3]
-color_image = cv2.cvtColor(color_image,cv2.COLOR_BGR2RGB)
-depth_image = rgbd[:,:,-1]
-depth_image = depth_image[:,:,np.newaxis]
-rgbd_image = np.concatenate((color_image,depth_image),axis=2)
-inverted_mask = cv2.bitwise_not(seg)
-masked_rgbd_image = cv2.bitwise_and(rgbd_image,rgbd_image,mask=inverted_mask)
-cv2.imwrite('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/scripts/rgb_image.png',masked_rgbd_image[:,:,0:3])
-masked_rgb_image = masked_rgbd_image[:,:,0:3]
-masked_depth_image = masked_rgbd_image[:,:,-1]
-cv2.imwrite('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/scripts/depth_image.png',normalize_depth_image(masked_depth_image))
-
-gazebo_rgb = cv2.imread('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_1/gazebo_robot_only.jpg')
-gazebo_depth = np.load('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_1/gazebo_robot_depth.npy')
-joined_depth = np.concatenate((gazebo_depth[np.newaxis],masked_depth_image[np.newaxis]),axis=0)
-joined_depth[joined_depth == 0] = 1000
-joined_depth_argmin = np.argmin(joined_depth,axis=0)
-attempt = masked_rgb_image * joined_depth_argmin[:,:,np.newaxis]
-inverted_joined_depth_argmin = 1 - joined_depth_argmin
-attempt2 = gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
-inpainted_image = attempt + attempt2
-cv2.imwrite('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_1/pog.png',inpainted_image)
-# import pdb
-# pdb.set_trace()
-# rows, cols = masked_depth_image.shape
-# y, x = np.meshgrid(range(rows), range(cols), indexing='ij')
-# Z = masked_depth_image
-# X = (x - intrinsic[0][2]) * Z / intrinsic[0][0]
-# Y = (y - intrinsic[1][2]) * Z / intrinsic[1][1]
-# points = np.column_stack((X.flatten(),Y.flatten(),Z.flatten()))
-# pcd = o3d.geometry.PointCloud()
-# pcd.points = o3d.utility.Vector3dVector(points)
-# coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame()
-# o3d.visualization.draw_geometries([pcd,coordinate_frame])
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/convert_issac_to_gazebo.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/convert_issac_frame_to_gazebo_frame.py
similarity index 100%
rename from mirage/mirage/ros_ws/src/gazebo_env/scripts/convert_issac_to_gazebo.py
rename to mirage/mirage/ros_ws/src/gazebo_env/scripts/convert_issac_frame_to_gazebo_frame.py
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/dummy_joint_command_publisher.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/dummy_joint_command_publisher.py
new file mode 100644
index 0000000..eba88bd
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/dummy_joint_command_publisher.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+
+import rclpy
+from rclpy.node import Node
+from std_msgs.msg import Float64MultiArray
+from sensor_msgs.msg import Image
+import cv2
+from cv_bridge import CvBridge
+import numpy as np
+import time as time
+
+class JointCommandsPublisher(Node):
+ def __init__(self):
+ super().__init__('joint_commands_publisher')
+ self.publisher_ = self.create_publisher(Float64MultiArray, '/joint_commands', 10)
+ self.ur5_subscriber_ = self.create_subscription(Image,'/ur5_left_camera/depth/image_raw',self.ur5_image_callback,10)
+ self.panda_subscriber_ = self.create_subscription(Image,'/panda_left_camera/depth/image_raw',self.panda_image_callback,10)
+ self.ur5_gripper_filepath_ = '/home/lawrence/xembody/xembody/xembody/xembody/src/ros_ws/src/gazebo_env/test_data/2024-04-10-211323.jpg'
+ self.panda_filepath_ = '/home/lawrence/xembody/xembody/xembody/xembody/src/ros_ws/src/gazebo_env/test_data/2024-04-15-005029.jpg'
+ self.cv_bridge_ = CvBridge()
+ self.ur5_gt_image_ = cv2.imread(self.ur5_gripper_filepath_)
+ self.panda_gt_image_ = cv2.imread(self.panda_filepath_)
+ self.is_ready_ = False
+ self.timer = self.create_timer(1, self.publish_joint_commands)
+
+ def ur5_image_callback(self,msg):
+ if self.is_ready_:
+ self.get_logger().info('Received image message')
+ test_image = cv2.rotate(self.ur5_gt_image_, cv2.ROTATE_180)
+ height, width, _ = test_image.shape
+ cropped_image = test_image[:, :width // 2]
+ cropped_image = cv2.resize(cropped_image, (1280,720))
+ cv2.imwrite('output.jpg',cropped_image)
+ depth_image = self.cv_bridge_.imgmsg_to_cv2(msg)
+ real_seg_np = (depth_image < 8).astype(np.uint8)
+ real_seg_255_np = 255 * real_seg_np
+ translation_matrix = np.float32([[1, 0, 40], [0, 1, 30]])
+ translated_mask = cv2.warpAffine(real_seg_255_np, translation_matrix, (real_seg_255_np.shape[1], real_seg_255_np.shape[0]))
+ translated_mask[:30,579:real_seg_255_np.shape[1]] = 255
+ translated_mask[:30,76:368] = 255
+ cv2.imwrite('output2.jpg',translated_mask)
+
+ def panda_image_callback(self,msg):
+ if self.is_ready_:
+ self.get_logger().info('Received image message')
+ test_image = cv2.rotate(self.panda_gt_image_, cv2.ROTATE_180)
+ height, width, _ = test_image.shape
+ cropped_image = test_image[:, :width // 2]
+ cropped_image = cv2.resize(cropped_image, (1280,720))
+ cv2.imwrite('output3.jpg',cropped_image)
+ depth_image = self.cv_bridge_.imgmsg_to_cv2(msg)
+ real_seg_np = (depth_image < 8).astype(np.uint8)
+ real_seg_255_np = 255 * real_seg_np
+ translation_matrix = np.float32([[1, 0, 0], [0, 1, 30]])
+ translated_mask = cv2.warpAffine(real_seg_255_np, translation_matrix, (real_seg_255_np.shape[1], real_seg_255_np.shape[0]))
+ translated_mask[:30,:] = 255
+ cv2.imwrite('output4.jpg',translated_mask)
+ import pdb
+ pdb.set_trace()
+
+ def publish_joint_commands(self):
+ msg = Float64MultiArray()
+ # Example joint commands, you can modify this list according to your needs
+ joint_commands = [-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787,-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787,0.04,0.04,0.0,0.0,0.0,0.0,0.0,0.0]
+ msg.data = joint_commands
+ self.publisher_.publish(msg)
+ self.get_logger().info('Publishing joint commands: %s' % joint_commands)
+ time.sleep(1)
+ self.is_ready_ = True
+
+def main(args=None):
+ rclpy.init(args=args)
+ joint_commands_publisher = JointCommandsPublisher()
+ rclpy.spin(joint_commands_publisher)
+ joint_commands_publisher.destroy_node()
+ rclpy.shutdown()
+
+if __name__ == '__main__':
+ main()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/ee_pose.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/ee_pose.py
deleted file mode 100644
index 9a90a50..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/ee_pose.py
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-
-from std_msgs.msg import String, Float64MultiArray
-import pathlib
-import time
-from sensor_msgs.msg import Image
-import cv2
-from cv_bridge import CvBridge
-from tracikpy import TracIKSolver
-import numpy as np
-from ament_index_python.packages import get_package_share_directory
-
-
-class EEPose(Node):
-
- def __init__(self):
- super().__init__('ee_pose')
- self.publisher_ = self.create_publisher(String, 'ee_pose', 10)
- self.joint_publisher_ = self.create_publisher(Float64MultiArray,'/forward_position_controller/commands',10)
- self.subscriber_ = self.create_subscription(
- String,
- 'ee_pose',
- self.listener_callback,
- 10)
- #self.image_subscriber_ = self.create_subscription(Image,'/camera/image_raw',self.image_listener_callback,10)
- msg = String()
- x = 0.6
- y = -0.2
- z = 0.525
- msg.data = str(x) + ' ' + str(y) + ' ' + str(z)
- self.orientation_ = [[[1,0,0],[0,1,0],[0,0,1]],[[0,0,-1],[0,1,0],[1,0,0]],[[1,0,0],[0,0,1],[0,-1,0]],[[-1,0,0],[0,0,-1],[0,-1,0]]]
- self.i = 0
- self.length_ = 3
- self.done_ = False
- self.wrote_video_ = False
- self.bridge_ = CvBridge()
- self.frames_ = []
- self.first_time_bool_ = True
- self.first_time_ = None
- self.last_time_ = None
- self.num_joints_ = 6
- urdf_file_path = get_package_share_directory('gazebo_env') + "/urdf/ur5e_analytical.urdf"
- self.ik_solver_ = TracIKSolver(
- urdf_file_path,
- "base_link",
- "ft_frame",
- )
- self.publisher_.publish(msg)
-
- def image_listener_callback(self,msg):
- if(self.first_time_bool_):
- self.first_time_bool_ = False
- self.first_time_ = time.time()
- if(not self.done_):
- frame = self.bridge_.imgmsg_to_cv2(msg,desired_encoding='bgr8')
- self.frames_.append(frame)
- elif(self.done_ and not self.wrote_video_):
- self.last_time_ = time.time()
- print("ITS OVER")
- print(len(self.frames_))
- print(str(self.last_time_ - self.first_time_))
- result = cv2.VideoWriter('filename.avi',
- cv2.VideoWriter_fourcc(*'MJPG'),
- int(len(self.frames_)/(self.last_time_ - self.first_time_)), (640,480))
- for frame in self.frames_:
- result.write(frame)
- result.release()
- self.wrote_video_ = True
-
- def listener_callback(self, msg):
- if(self.first_time_bool_):
- self.first_time_bool_ = False
- time.sleep(1)
- print("IN HERE")
- xyz_str = msg.data
- xyz_arr = xyz_str.split(' ')
- x = float(xyz_arr[0])
- y = float(xyz_arr[1])
- z = float(xyz_arr[2])
- print([x,y,z])
- print(self.orientation_[self.i][0])
- print(self.orientation_[self.i][1])
- rotation = np.array(self.orientation_[self.i]).T
- translation = [x,y,z]
- base = [0,0,0,1]
- translation = np.array([translation])
- base = np.array([base])
- ee_pose = np.concatenate((np.concatenate((rotation,translation.T),axis=1),base),axis=0)
- print(ee_pose)
- self.i += 1
- start_configuration = np.zeros(self.num_joints_)
-
- configuration = self.ik_solver_.ik(ee_pose, qinit=start_configuration)
- print(configuration)
- configuration = [float(i) for i in configuration]
- joint_message = Float64MultiArray()
- joint_message.data = configuration
-
- time.sleep(2)
- self.joint_publisher_.publish(joint_message)
- print("PRIME STUFF")
- if(self.i >= self.length_):
- #time.sleep(1)
- self.done_ = True
-
-def main(args=None):
- rclpy.init(args=args)
-
- ee_pose = EEPose()
-
- rclpy.spin(ee_pose)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- ee_pose.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/find_rpy_for_cam.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/find_rpy_for_cam.py
index a8aad85..0fcd30d 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/find_rpy_for_cam.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/find_rpy_for_cam.py
@@ -4,9 +4,11 @@
camera_link_to_real_camera_link = np.array([[0,0,1],
[-1,0,0],
[0,-1,0]])
-world_to_real_camera_link = np.array([[-0.9065319 , 0.25194152, -0.33871137],
- [0.41907041, 0.44056495, -0.79390334],
- [-0.05079286, -0.86164261, -0.50496742]])
+
+world_to_real_camera_link = np.array([[-0.86235505,0.32847092,-0.38529289],
+ [0.50327672, -0.72315981, 0.35937403],
+ [-0.05528389, -0.81752946 , -0.57322707]])
+
real_camera_link_to_camera_link = np.linalg.inv(camera_link_to_real_camera_link)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/find_rpy_for_cam_wrist.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/find_rpy_for_cam_wrist.py
new file mode 100644
index 0000000..ac1cc28
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/find_rpy_for_cam_wrist.py
@@ -0,0 +1,25 @@
+import numpy as np
+from scipy.spatial.transform import Rotation
+
+camera_link_to_real_camera_link = np.array([[0,0,1,0],
+ [-1,0,0,0],
+ [0,-1,0,0],
+ [0,0,0,1]])
+
+real_camera_link_to_camera_link = np.linalg.inv(camera_link_to_real_camera_link)
+
+ee_to_real_camera_link = np.array([[0.72579549, -0.39985199, 0.55976718, -0.07525412],
+ [0.68784404, 0.43314629, -0.58245589, 0.03308232],
+ [-0.00956493, 0.80777638, 0.58941143, 0.01456681],
+ [0,0,0,1]])
+
+real_camera_link_to_ee = np.linalg.inv(ee_to_real_camera_link)
+
+
+ee_to_camera_link = ee_to_real_camera_link @ real_camera_link_to_camera_link
+
+print(ee_to_camera_link)
+ee_to_camera_link_rotation = ee_to_camera_link[:3,:3]
+rpy_angles = Rotation.from_matrix(ee_to_camera_link_rotation).as_euler('xyz', degrees=False)
+
+print(rpy_angles)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/fk_tests.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/fk_tests.py
deleted file mode 100644
index c01a0ff..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/fk_tests.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import numpy as np
-from tracikpy import TracIKSolver
-
-ur5_solver = TracIKSolver("/home/lawrence/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5_ik_real.urdf","base_link","wrist_3_link")
-panda_solver = TracIKSolver("/home/lawrence/cross_embodiment_ws/src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf","panda_link0","panda_link8")
-ee_pose = panda_solver.fk(np.array([0.05445501208305359, 0.29904165863990784, -0.05337003618478775, -2.1572153568267822, 0.010299555025994778, 2.438530683517456,0.003930782433599234]))
-print(ee_pose)
-qout = panda_solver.ik(ee_pose,qinit=np.zeros(panda_solver.number_of_joints))
-print(qout)
-# panda_fk_solver = TracIKSolver("/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/panda_arm_hand_only_ik.urdf","world","panda_hand")
-# ee_pose = panda_fk_solver.fk(np.zeros(panda_fk_solver.number_of_joints))
-# ur5e_ik_solver = TracIKSolver("/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo_ik.urdf","world","tool0")
-# qout = ur5e_ik_solver.ik(ee_pose,qinit=np.zeros(ur5e_ik_solver.number_of_joints))
-# print(qout)
-
-#print(ur5e_ik_solver.fk(np.array([0,0,0,0,0,0])))
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/franka_to_franka_hdf5_conversion.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/franka_to_franka_hdf5_conversion.py
deleted file mode 100644
index 1f86d2d..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/franka_to_franka_hdf5_conversion.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import h5py
-import cv2
-import numpy as np
-input = '/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_to_franka/image_v141_copy.hdf5'
-output = '/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_to_franka/image_v141_inpaint.hdf5'
-
-with h5py.File(input, 'r+') as file:
- for i in range(len(file['data'].keys())):
- demo_str = 'demo_'+str(i)
- obs = file['data'][demo_str]['obs']
- obs_agentview_image_inpainted = None
- next_obs_agentview_image_inpainted = None
- for j in range(obs['agentview_image'].shape[0]):
- inpaint_file_path = '/home/benchturtle/cross_embodiment_ws/franka_to_franka/demo_' + str(i) + '/inpaint' + str(j) + '.png'
- next_inpaint_file_path = '/home/benchturtle/cross_embodiment_ws/franka_to_franka/demo_' + str(i) + '/inpaint' + str(j+1) + '.png'
- if(j+1 == obs['agentview_image'].shape[0]):
- next_inpaint_file_path = inpaint_file_path
- inpaint_obs_image = cv2.imread(inpaint_file_path)
- inpaint_next_obs_image = cv2.imread(inpaint_file_path)
- inpaint_obs_image = inpaint_obs_image[np.newaxis,:]
- inpaint_next_obs_image = inpaint_next_obs_image[np.newaxis,:]
- if(obs_agentview_image_inpainted is None):
- obs_agentview_image_inpainted = inpaint_obs_image
- next_obs_agentview_image_inpainted = inpaint_next_obs_image
- else:
- obs_agentview_image_inpainted = np.concatenate((obs_agentview_image_inpainted,inpaint_obs_image))
- next_obs_agentview_image_inpainted = np.concatenate((next_obs_agentview_image_inpainted,inpaint_next_obs_image))
- file['data'][demo_str]['obs']['agentview_image_inpainted'] = obs_agentview_image_inpainted
- file['data'][demo_str]['next_obs']['agentview_image_inpainted'] = next_obs_agentview_image_inpainted
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/full_ur5e_joint_state_publisher_node.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/full_ur5e_joint_state_publisher_node.py
deleted file mode 100644
index 68ae502..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/full_ur5e_joint_state_publisher_node.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-from sensor_msgs.msg import JointState
-from std_msgs.msg import Float64MultiArray
-
-class FullUR5EJointStatePublisher(Node):
- def __init__(self):
- super().__init__('full_ur5e_joint_state_publisher')
- self.subscription = self.create_subscription(
- Float64MultiArray,
- '/joint_commands', # Topic name for /joint_commands
- self.joint_commands_callback,
- 10)
- self.publisher = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.joint_state_msg = JointState()
-
- def joint_commands_callback(self, msg):
- # Fill the JointState message with data from the Float64MultiArray
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- gripper_val = msg.data[6]
- msg.data.append(gripper_val)
- msg.data.append(gripper_val)
- msg.data.append(gripper_val)
- msg.data.append(-gripper_val)
- msg.data.append(-gripper_val)
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.publisher.publish(self.joint_state_msg)
-
-def main(args=None):
- rclpy.init(args=args)
- full_ur5e_joint_state_publisher = FullUR5EJointStatePublisher()
- rclpy.spin(full_ur5e_joint_state_publisher)
- full_ur5e_joint_state_publisher.destroy_node()
- rclpy.shutdown()
-
-if __name__ == '__main__':
- main()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/gazebo_image_test.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/gazebo_image_test.py
deleted file mode 100644
index 1bc34cf..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/gazebo_image_test.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-from sensor_msgs.msg import Image
-from cv_bridge import CvBridge
-import cv2
-
-class ImageSubscriberNode(Node):
- def __init__(self):
- super().__init__('image_subscriber_node')
- self.subscription = self.create_subscription(
- Image,
- '/camera/image_raw', # Replace with your image topic name
- self.image_callback,
- 10 # Adjust the QoS profile as needed
- )
- self.subscription # Prevent unused variable warning
- self.bridge = CvBridge()
-
- def image_callback(self, msg):
- try:
- # Convert the ROS Image message to a OpenCV image
- cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
- except Exception as e:
- self.get_logger().error(f"Error converting image message: {str(e)}")
- return
-
- # Save the received image using cv2.imwrite
- save_path = '/home/benchturtle/gazebo_img.jpg' # Specify your save path
- cv2.imwrite(save_path, cv_image)
- self.get_logger().info(f"Image saved to {save_path}")
-
-def main(args=None):
- rclpy.init(args=args)
- image_subscriber_node = ImageSubscriberNode()
- rclpy.spin(image_subscriber_node)
- rclpy.shutdown()
-
-if __name__ == '__main__':
- main()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/gripper_client.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/gripper_client.py
deleted file mode 100644
index 1d0b7eb..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/gripper_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import rclpy
-from rclpy.action import ActionClient
-from rclpy.node import Node
-
-from control_msgs.action import GripperCommand
-
-#from action_tutorials_interfaces.action import Fibonacci
-
-
-class GripperClient(Node):
-
- def __init__(self):
- super().__init__('gripper_action_client')
- self._action_client = ActionClient(self, GripperCommand, '/robotiq_gripper_controller/gripper_cmd')
-
- def send_goal(self):
- goal_msg = GripperCommand.Goal()
- goal_msg.command.position = 0.4
- goal_msg.command.max_effort = 10.0
- print(goal_msg.command)
- print("I'm HIM")
- # goal_msg.order = order
-
- self._action_client.wait_for_server()
- print("WE HERE")
-
- return self._action_client.send_goal_async(goal_msg)
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- action_client = GripperClient()
-
- future = action_client.send_goal()
-
- rclpy.spin_until_future_complete(action_client, future)
-
- print("COOK")
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/gripper_joint_state_publisher_node.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/gripper_joint_state_publisher_node.py
deleted file mode 100644
index 907d542..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/gripper_joint_state_publisher_node.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-from sensor_msgs.msg import JointState
-from std_msgs.msg import Float64MultiArray
-
-class FullUR5EJointStatePublisher(Node):
- def __init__(self):
- super().__init__('full_ur5e_joint_state_publisher')
- self.subscription = self.create_subscription(
- Float64MultiArray,
- '/joint_commands', # Topic name for /joint_commands
- self.joint_commands_callback,
- 10)
- self.publisher = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 10)
- self.joint_state_msg = JointState()
-
- def joint_commands_callback(self, msg):
- # Fill the JointState message with data from the Float64MultiArray
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- gripper_val = msg.data[0]
- msg.data.append(gripper_val)
- msg.data.append(gripper_val)
- msg.data.append(gripper_val)
- msg.data.append(-gripper_val)
- msg.data.append(-gripper_val)
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.publisher.publish(self.joint_state_msg)
-
-def main(args=None):
- rclpy.init(args=args)
- full_ur5e_joint_state_publisher = FullUR5EJointStatePublisher()
- rclpy.spin(full_ur5e_joint_state_publisher)
- full_ur5e_joint_state_publisher.destroy_node()
- rclpy.shutdown()
-
-if __name__ == '__main__':
- main()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_joint_command_publisher.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_joint_command_publisher.py
deleted file mode 100644
index 4eae4cd..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_joint_command_publisher.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python3
-import rclpy
-from rclpy.node import Node
-from std_msgs.msg import Float64MultiArray
-import numpy as np
-
-class JointCommandsPublisher(Node):
- def __init__(self):
- super().__init__('joint_commands_publisher')
- self.publisher_ = self.create_publisher(Float64MultiArray, 'joint_commands', 10)
- self.timer = self.create_timer(0.1, self.publish_joint_commands)
-
- def publish_joint_commands(self):
- # Creating a Float64MultiArray message
- msg = Float64MultiArray()
-
- # Example joint commands
- joint_commands = [-0.12272371, 0.74588609, 0.29257262, -1.77508295, -0.2950204 ,
- 2.45879841, 1.37170529,0.020833] # Replace this with your joint commands
-
- # Assigning the joint commands to the Float64MultiArray message
- msg.data = joint_commands
-
- # Publishing the message
- self.publisher_.publish(msg)
- self.get_logger().info('Publishing joint commands: %s' % str(msg.data))
-
-def main(args=None):
- rclpy.init(args=args)
- joint_commands_publisher = JointCommandsPublisher()
- rclpy.spin(joint_commands_publisher)
- joint_commands_publisher.destroy_node()
- rclpy.shutdown()
-
-if __name__ == '__main__':
- main()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_no_gripper_joint_command_publisher.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_no_gripper_joint_command_publisher.py
deleted file mode 100644
index d22ce75..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_no_gripper_joint_command_publisher.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python3
-import rclpy
-from rclpy.node import Node
-from std_msgs.msg import Float64MultiArray
-import numpy as np
-
-class JointCommandsPublisher(Node):
- def __init__(self):
- super().__init__('joint_commands_publisher')
- self.publisher_ = self.create_publisher(Float64MultiArray, 'joint_commands', 10)
- self.timer = self.create_timer(0.1, self.publish_joint_commands)
-
- def publish_joint_commands(self):
- # Creating a Float64MultiArray message
- msg = Float64MultiArray()
-
- # Example joint commands
- joint_commands = [-0.12272371, 0.74588609, 0.29257262, -1.77508295, -0.2950204 ,
- 2.45879841, 1.37170529] # Replace this with your joint commands
-
- # Assigning the joint commands to the Float64MultiArray message
- msg.data = joint_commands
-
- # Publishing the message
- self.publisher_.publish(msg)
- self.get_logger().info('Publishing joint commands: %s' % str(msg.data))
-
-def main(args=None):
- rclpy.init(args=args)
- joint_commands_publisher = JointCommandsPublisher()
- rclpy.spin(joint_commands_publisher)
- joint_commands_publisher.destroy_node()
- rclpy.shutdown()
-
-if __name__ == '__main__':
- main()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_point_cloud_publisher.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_point_cloud_publisher.py
deleted file mode 100644
index e6c17e9..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_point_cloud_publisher.py
+++ /dev/null
@@ -1,659 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-
-class PointCloudPublisher(Node):
- def __init__(self):
- super().__init__('point_cloud_publisher')
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 10
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",10)
- timer_period = 0.5
- self.links_info_ = []
-
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
-
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",10)
- self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",10)
- setup_mesh_timer = self.create_timer(timer_period,self.setupMeshes)
- #exit()
- #self.sync_ = TimeSynchronizer(self.subscribers_,10)
- #self.sync_.registerCallback(self.pointcloud_callback)
-
- def cameraCallback(self,msg):
- cv2_image = self.cv_bridge_.imgmsg_to_cv2(msg)
- cv2.imwrite('gazebo_image.png',cv2_image)
-
- def fullPointcloudCallback(self,msg):
- all_pixels = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_, dtype=np.uint8)
- white_color = (255,255,255)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(y),round(x)] = white_color
- cv2.imwrite('/home/benchturtle/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
-
-
- def setupMeshes(self):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- # import pdb
- # pdb.set_trace()
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "camera_color_optical_frame"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- print("I am the Senate",flush=True)
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- # import pdb
- # pdb.set_trace()
- # open3d_mesh.rotate(transform_matrix[:3, :3],[0,0,0])
- # open3d_mesh.translate(transform_matrix[:3, 3])
- # test_pcd2 = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- # mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- # o3d.visualization.draw_geometries([test_pcd,test_pcd2,mesh_coordinate_frame])
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- #pass
- print("Error finding transform between camera frame and " + str(link_name))
-
- def cameraInfoCallback(self,msg):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def pointcloud_callback(self,msg1,msg2,msg3,msg4,msg5,msg6,msg7):
- if(self.camera_intrinsic_matrix_ is None):
- return
- else:
- msg1_pixels = self.getPixels(msg1)
- msg2_pixels = self.getPixels(msg2)
- msg3_pixels = self.getPixels(msg3)
- msg4_pixels = self.getPixels(msg4)
- msg5_pixels = self.getPixels(msg5)
- msg6_pixels = self.getPixels(msg6)
- msg7_pixels = self.getPixels(msg7)
- all_pixels = np.vstack((msg1_pixels,msg2_pixels,msg3_pixels,msg4_pixels,msg5_pixels,msg6_pixels,msg7_pixels))
- mask_image = np.zeros(self.image_shape_, dtype=np.uint8)
- white_color = (255,255,255)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(y),round(x)] = white_color
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(mask_image,encoding="bgr8")
- self.mask_image_publisher_.publish(ros_mask_image)
-
- def getPixels(self,msg):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- msg_data_homogenous = np.hstack((msg_data,np.ones((msg_data.shape[0],1))))
- pixel_data_homogenous = np.dot(self.camera_intrinsic_matrix_,msg_data_homogenous.T).T
- pixel_data = pixel_data_homogenous[:,:2] / pixel_data_homogenous[:,2:]
- return pixel_data
-
- def createTransform(self,rpy_str,xyz_str):
- rpy_array = rpy_str.split(' ')
- xyz_array = xyz_str.split(' ')
- roll = float(rpy_array[0])
- pitch = float(rpy_array[1])
- yaw = float(rpy_array[2])
- x = float(xyz_array[0])
- y = float(xyz_array[1])
- z = float(xyz_array[2])
-
- rotation_x = np.array([[1, 0, 0],
- [0, math.cos(roll), -math.sin(roll)],
- [0, math.sin(roll), math.cos(roll)]])
-
- rotation_y = np.array([[math.cos(pitch), 0, math.sin(pitch)],
- [0, 1, 0],
- [-math.sin(pitch), 0, math.cos(pitch)]])
-
- rotation_z = np.array([[math.cos(yaw), -math.sin(yaw), 0],
- [math.sin(yaw), math.cos(yaw), 0],
- [0, 0, 1]])
-
- rotation_matrix = np.dot(rotation_z, np.dot(rotation_y, rotation_x))
- translation_vector = np.array([[x],[y],[z]])
- transform_matrix = np.concatenate((rotation_matrix,translation_vector),axis=1)
- transform_matrix = np.concatenate((transform_matrix,np.array([[0,0,0,1]])),axis=0)
- return transform_matrix
-
- def eulerToR(self,rpy_np):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def transformStampedToMatrix(self,rotation,translation):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def debugTimerCallback(self,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str):
- mesh_scene = trimesh.load(filename)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # pcd_data = np.asarray(pcd.points)
- # if(link_name == "base_link_inertia"):
- # pcd_data = pcd_data[:,[0,2,1]]
- # pcd_data[:,1] *= -1
- # elif(link_name == "shoulder_link"):
- # # R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- # # open3d_mesh.rotate(R,[0,0,0])
- # # # print("RPY")
- # # # print(rpy_str)
- # # # print("XYZ Str")
- # # # print(xyz_str)
- # # rpy_str_list = rpy_str.split()
- # # rpy_floats = [float(x) for x in rpy_str_list]
- # # rpy_np = np.array(rpy_floats)
- # # # print("RPY NUMPY")
- # # # print(rpy_np)
- # # R2 = pcd.get_rotation_matrix_from_xyz(rpy_np)
- # # # print("We have R2 with us")
- # # # print(R2)
- # # open3d_mesh.rotate(R2,[0,0,0])
- # # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # # pcd_data = np.asarray(pcd.points)
- # # #pcd_data = pcd_data[:,[0,2,1]]
- # # #pcd_data[:,0] *= -1
-
- # R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- # open3d_mesh.rotate(R,[0,0,0])
- # # print("RPY")
- # # print(rpy_str)
- # # print("XYZ Str")
- # # print(xyz_str)
- # rpy_str_list = rpy_str.split()
- # rpy_floats = [float(x) for x in rpy_str_list]
- # rpy_np = np.array(rpy_floats)
- # xyz_str_list = xyz_str.split()
- # xyz_floats = [float(x) for x in xyz_str_list]
- # xyz_np = 1000 * np.array(xyz_floats)
- # print("XYZ NUMPY")
- # print(xyz_np)
- # R21 = pcd.get_rotation_matrix_from_xyz(rpy_np)
- # print("We have R21 with us")
- # print(R21)
- # R22 = pcd.get_rotation_matrix_from_xzy(rpy_np)
- # print("We have R22 with us")
- # print(R22)
- # R23 = pcd.get_rotation_matrix_from_yxz(rpy_np)
- # print("We have R23 with us")
- # print(R23)
- # R24 = pcd.get_rotation_matrix_from_yzx(rpy_np)
- # print("We have R24 with us")
- # print(R24)
- # R25 = pcd.get_rotation_matrix_from_zxy(rpy_np)
- # print("We have R25 with us")
- # print(R25)
- # R26 = pcd.get_rotation_matrix_from_zyx(rpy_np)
- # print("We have R26 with us")
- # print(R26)
- # R2 = self.eulerToR(rpy_np)
- # print("We have R2 with us")
- # print(R2)
- # open3d_mesh.rotate(R2,[0,0,0])
- # open3d_mesh.translate(xyz_np)
- # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # pcd_data = np.asarray(pcd.points)
- # elif(link_name == "upper_arm_link"):
- # R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- # open3d_mesh.rotate(R,[0,0,0])
- # # print("RPY")
- # # print(rpy_str)
- # # print("XYZ Str")
- # # print(xyz_str)
- # rpy_str_list = rpy_str.split()
- # rpy_floats = [float(x) for x in rpy_str_list]
- # rpy_np = np.array(rpy_floats)
- # xyz_str_list = xyz_str.split()
- # xyz_floats = [float(x) for x in xyz_str_list]
- # xyz_np = 1000 * np.array(xyz_floats)
- # print("XYZ NUMPY")
- # print(xyz_np)
- # R21 = pcd.get_rotation_matrix_from_xyz(rpy_np)
- # print("We have R21 with us")
- # print(R21)
- # R22 = pcd.get_rotation_matrix_from_xzy(rpy_np)
- # print("We have R22 with us")
- # print(R22)
- # R23 = pcd.get_rotation_matrix_from_yxz(rpy_np)
- # print("We have R23 with us")
- # print(R23)
- # R24 = pcd.get_rotation_matrix_from_yzx(rpy_np)
- # print("We have R24 with us")
- # print(R24)
- # R25 = pcd.get_rotation_matrix_from_zxy(rpy_np)
- # print("We have R25 with us")
- # print(R25)
- # R26 = pcd.get_rotation_matrix_from_zyx(rpy_np)
- # print("We have R26 with us")
- # print(R26)
- # R2 = self.eulerToR(rpy_np)
- # print("We have R2 with us")
- # print(R2)
- # open3d_mesh.rotate(R2,[0,0,0])
- # open3d_mesh.translate(xyz_np)
- # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # pcd_data = np.asarray(pcd.points)
- # elif(link_name == "forearm_link"):
- # R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- # open3d_mesh.rotate(R,[0,0,0])
- # rpy_str_list = rpy_str.split()
- # rpy_floats = [float(x) for x in rpy_str_list]
- # rpy_np = np.array(rpy_floats)
- # xyz_str_list = xyz_str.split()
- # xyz_floats = [float(x) for x in xyz_str_list]
- # xyz_np = 1000 * np.array(xyz_floats)
- # print("XYZ NUMPY")
- # print(xyz_np)
- # R2 = self.eulerToR(rpy_np)
- # print("We have R2 with us")
- # print(R2)
- # open3d_mesh.rotate(R2,[0,0,0])
- # open3d_mesh.translate(xyz_np)
- # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # pcd_data = np.asarray(pcd.points)
- # #pcd_data = pcd_data[:,[1,0,2]]
- # #pcd_data[:,0] *= -1
-
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- open3d_mesh.rotate(R2,[0,0,0])
- open3d_mesh.translate(xyz_np)
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time()
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- except TransformException as ex:
- return
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=100)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
-
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "camera_color_optical_frame"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- publisher.publish(point_cloud_msg)
-
- def timerCallback(self,filename,link_name,publisher,rpy_str,xyz_str):
-
- mesh = trimesh.load(filename)
- transform_matrix = self.createTransform(rpy_str,xyz_str)
- o3d_mesh = None
- pcds = []
- points = None
- R = np.array([[1,0,0],[0,0,-1],[0,1,0]])
- R2 = np.array([[-1,0,0],[0,-1,0],[0,0,1]])
- for item in mesh.geometry:
- o3d_mesh = mesh.geometry[item].as_open3d
- #o3d_mesh = o3d_mesh.rotate(R)
- pcd = o3d_mesh.sample_points_uniformly(number_of_points=100000)
- if points is None:
- points = np.asarray(pcd.points)
- else:
- points = np.concatenate((points,pcd.points),axis=0)
- pcds.append(pcd)
- new_pcd = o3d.geometry.PointCloud()
- new_pcd.points = o3d.utility.Vector3dVector(points)
- center_translation = -new_pcd.get_center()
- new_pcd = new_pcd.translate(center_translation)
- new_pcd = new_pcd.rotate(R)
- new_pcd = new_pcd.rotate(R2)
-
- new_pcd = new_pcd.transform(transform_matrix)
- #print(transform_matrix)
- #mesh = mesh.apply_transform(transform_matrix)
- # vertices = None
- # for item in mesh.geometry:
- # if vertices is None:
- # vertices = mesh.geometry[item].vertices
- # else:
- # vertices = np.concatenate((vertices,mesh.geometry[item].vertices),axis=0)
-
- # # Create a visualization window
- # pcd = o3d.geometry.PointCloud()
- # pcd.points = o3d.utility.Vector3dVector(vertices)
- # R = np.array([[1,0,0],[0,0,-1],[0,1,0]])
- # R2 = np.array([[-1,0,0],[0,-1,0],[0,0,1]])
- # pcd = pcd.rotate(R)
- # pcd = pcd.rotate(R2)
- #pcd = pcd.transform(transform_matrix)
- # Pointcloud still rotated incorrectly
- #additional_transform = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
- #pcd = pcd.transform(additional_transform)
- pcd_data = np.asarray(new_pcd.points) / 1000
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = link_name
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
-
- publisher.publish(point_cloud_msg)
- # self.dae_paths_ = glob.glob(self.dae_main_path_ + '/*.dae')
- # self.publishers_ = []
- # self.timers_ = []
- # timer_period = 0.5
- # for dae_path in self.dae_paths_:
- # ur5_part = dae_path[dae_path.rfind('/')+1:dae_path.rind('.')]
- # publisher = self.create_publisher(PointCloud2,ur5_part+'_pointcloud',10)
- # self.publishers_.append(publisher)
- # timer = self.create_timer(timer_period,partial(self.timerCallback,param1=dae_path))
-
- # self.ur5_parts_ = ['base','forearm','shoulder','upperarm','wrist1','wrist2','wrist3']
- # self.publishers_ = []
- # self.timers_ = []
- # timer_period = 0.5
- # for ur5_part in self.ur5_parts_:
- # publisher = self.create_publisher(PointCloud2,ur5_part+'_pointcloud',10)
- # self.publishers_.append(publisher)
- # timer = self.create_timer(timer_period,partial(self.timerCallback,param1=ur5_part))
- # self.publisher_ = self.create_publisher(PointCloud2, '/base_pointcloud', 10)
- # timer_period = 0.5 # seconds
- # self.timer = self.create_timer(timer_period, self.timer_callback)
- # self.i = 0
-
- # print(glob.glob(self.dae_main_path_ + '/*.dae'))
- # exit()
- # self.base_filepath_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/meshes/ur5e/visual/base.dae"
-
- # def timer_callback(self):
- # mesh = trimesh.load(self.base_filepath_)
- # vertices = None
- # for item in mesh.geometry:
- # if vertices is None:
- # vertices = mesh.geometry[item].vertices
- # else:
- # vertices = np.concatenate((vertices,mesh.geometry[item].vertices),axis=0)
-
- # # Create a visualization window
- # pcd = o3d.geometry.PointCloud()
- # pcd.points = o3d.utility.Vector3dVector(vertices)
-
- # pcd_data = np.asarray(pcd.points) / 1000
- # np.savetxt('data.csv',pcd_data,delimiter=", ")
- # point_cloud_msg = PointCloud2()
- # point_cloud_msg.header = Header()
- # point_cloud_msg.header.frame_id = 'base_link_inertia'
- # fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- # PointField(name='z', offset=4, datatype=PointField.FLOAT32, count=1),
- # PointField(name='y', offset=8, datatype=PointField.FLOAT32, count=1),
- # ]
- # point_cloud_msg.height = 1
- # point_cloud_msg.width = len(pcd_data)
- # point_cloud_msg.fields = fields
- # point_cloud_msg.is_bigendian = False
- # point_cloud_msg.point_step = 3 * 4
- # point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- # point_cloud_msg.is_dense = True
- # point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
-
- # self.publisher_.publish(point_cloud_msg)
- # self.i += 1
-
-def main(args=None):
- rclpy.init(args=args)
-
- point_cloud_publisher = PointCloudPublisher()
-
- rclpy.spin(point_cloud_publisher)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- point_cloud_publisher.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_ur5_gripper_real_joint_command_publisher.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_ur5_gripper_real_joint_command_publisher.py
deleted file mode 100644
index 1b79146..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/panda_ur5_gripper_real_joint_command_publisher.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python3
-import rclpy
-from rclpy.node import Node
-from std_msgs.msg import Float64MultiArray
-import numpy as np
-
-class JointCommandsPublisher(Node):
- def __init__(self):
- super().__init__('joint_commands_publisher')
- self.publisher_ = self.create_publisher(Float64MultiArray, 'joint_commands', 10)
- self.timer = self.create_timer(0.1, self.publish_joint_commands)
-
- def publish_joint_commands(self):
- # Creating a Float64MultiArray message
- msg = Float64MultiArray()
-
- # Example joint commands
- joint_commands = [0.05661974, 0.18127457, -0.05910101, -2.20165285 , 0.0197648 , 2.38321172,
- -0.01350597+1.57,0.0]
- # Assigning the joint commands to the Float64MultiArray message
- msg.data = joint_commands
-
- # Publishing the message
- self.publisher_.publish(msg)
- self.get_logger().info('Publishing joint commands: %s' % str(msg.data))
-
-def main(args=None):
- rclpy.init(args=args)
- joint_commands_publisher = JointCommandsPublisher()
- rclpy.spin(joint_commands_publisher)
- joint_commands_publisher.destroy_node()
- rclpy.shutdown()
-
-if __name__ == '__main__':
- main()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node.py
deleted file mode 100644
index 60f9446..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFiles
-
-class ReadData(Node):
- def __init__(self):
- super().__init__('read_data_node')
- self.is_ready_ = False
- self.franka_info_folder_ = '/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_1/'
- timer_period = 2
- self.timer_ = self.create_timer(timer_period, self.timerCallback)
- self.depth_file_ = "rgbd.npy"
- self.segmentation_ = "seg.jpg"
- self.joints_ = "joints.txt"
- self.publisher_ = self.create_publisher(InputFiles,'/input_files',1)
- self.is_ready_ = True
-
- def timerCallback(self):
- if self.is_ready_:
- depth_file = self.franka_info_folder_ + self.depth_file_
- segmentation = self.franka_info_folder_ + self.segmentation_
- joints = self.franka_info_folder_ + self.joints_
- input_file_msg = InputFiles()
- input_file_msg.depth_file = depth_file
- input_file_msg.segmentation = segmentation
- input_file_msg.joints = joints
- self.publisher_.publish(input_file_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- read_data = ReadData()
-
- rclpy.spin(read_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- read_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_input_files_real_data_node.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_input_files_real_data_node.py
new file mode 100644
index 0000000..8a1bc8b
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_input_files_real_data_node.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+import rclpy
+from rclpy.node import Node
+
+from std_msgs.msg import String
+from input_filenames_msg.msg import InputFilesRealData, InputFilesRealDataMulti
+import cv2
+from cv_bridge import CvBridge
+
+class MinimalPublisher(Node):
+
+ def __init__(self):
+ super().__init__('minimal_publisher')
+ self.publisher_ = self.create_publisher(InputFilesRealDataMulti, 'input_files_data_real', 10)
+ self.ur5_gripper_filepath_ = '/home/lawrence/xembody/xembody/xembody/xembody/src/ros_ws/src/gazebo_env/test_data/2024-04-10-211323.jpg'
+ self.cv_bridge_ = CvBridge()
+ self.ur5_gt_image_ = cv2.imread(self.ur5_gripper_filepath_)
+ test_image = cv2.rotate(self.ur5_gt_image_, cv2.ROTATE_180)
+ height, width, _ = test_image.shape
+ cropped_left_image = test_image[:, :width // 2]
+ cropped_left_image = cv2.resize(cropped_left_image, (1280,720))
+ self.ros_ur5_left_image_ = self.cv_bridge_.cv2_to_imgmsg(cropped_left_image)
+ cropped_right_image = test_image[:, width // 2:]
+ cropped_right_image = cv2.resize(cropped_right_image, (1280,720))
+ self.ros_ur5_right_image_ = self.cv_bridge_.cv2_to_imgmsg(cropped_right_image)
+ self.joints_ = [-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787,0.0]
+ self.i = 0
+ self.timer_callback()
+
+ def timer_callback(self):
+ msg = InputFilesRealDataMulti()
+ msg_left_data = InputFilesRealData()
+ msg_left_data.joints = self.joints_
+ msg_left_data.rgb = self.ros_ur5_left_image_
+ msg.data_pieces.append(msg_left_data)
+ msg_right_data = InputFilesRealData()
+ msg_right_data.joints = self.joints_
+ msg_right_data.rgb = self.ros_ur5_right_image_
+ msg.data_pieces.append(msg_right_data)
+ self.publisher_.publish(msg)
+ print("Published")
+
+
+def main(args=None):
+ rclpy.init(args=args)
+
+ minimal_publisher = MinimalPublisher()
+
+ rclpy.spin(minimal_publisher)
+
+ # Destroy the node explicitly
+ # (optional - otherwise it will be done automatically
+ # when the garbage collector destroys the node object)
+ minimal_publisher.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_real_franka_check2.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_real_franka_check2.py
deleted file mode 100644
index 87e4706..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_real_franka_check2.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Bool
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRealData, InputFilesRealDataMulti
-
-class ReadData(Node):
- def __init__(self):
- super().__init__('read_data_node')
- self.is_ready_ = False
- self.cv_bridge_ = CvBridge()
- self.franka_info_folder_ = '/home/lawrence/cross_embodiment_ws/src/gazebo_env/franka_check_2'
- self.check_npy_file_ = "/home/lawrence/cross_embodiment_ws/src/gazebo_env/franka_check_2/check.npy"
- self.check_npy_ = np.load(self.check_npy_file_,allow_pickle=True).tolist()
- self.rgb_np_ = self.check_npy_['img_0']['img_left']
- self.depth_np_ = self.check_npy_['img_0']['depth']
-
- cv2.imwrite('fake_depth.png',self.normalize_depth_image(self.depth_np_))
- self.joints_np_ = self.check_npy_['img_0']['joint_angles']
- self.rgb_ = self.cv_bridge_.cv2_to_imgmsg(self.rgb_np_,'bgr8')
- self.depth_ = self.depth_np_.astype('float64').flatten().tolist()
- self.joints_ = self.joints_np_.astype('float64').tolist()
- self.joints_.append(0.0)
- self.publisher_ = self.create_publisher(InputFilesRealDataMulti,'/input_files_data_real',1)
- input_file_msg = InputFilesRealData()
- input_file_msg.rgb = self.rgb_
- input_file_msg.depth_map = self.depth_
- input_file_msg.joints = self.joints_
- input_file_msgs = InputFilesRealDataMulti()
- input_file_msgs.data_pieces = [input_file_msg]
- self.publisher_.publish(input_file_msgs)
- print("Published")
- self.is_ready_ = True
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
-def main(args=None):
- rclpy.init(args=args)
-
- read_data = ReadData()
-
- rclpy.spin(read_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- read_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_real_multi_ur5_check.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_real_multi_ur5_check.py
deleted file mode 100644
index d84bc40..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_real_multi_ur5_check.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Bool
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRealData, InputFilesRealDataMulti
-
-class ReadData(Node):
- def __init__(self):
- super().__init__('read_data_node')
- self.is_ready_ = False
- self.cv_bridge_ = CvBridge()
- self.franka_info_folder_ = '/home/lawrence/cross_embodiment_ws/src/gazebo_env/ur5_check'
- self.check_npy_file_ = "/home/lawrence/cross_embodiment_ws/src/gazebo_env/ur5_check/check_ur5.npy"
- self.check_npy_ = np.load(self.check_npy_file_,allow_pickle=True).tolist()
- print("Left Intrinsic matrix: " + str(self.check_npy_['cam_intrinsic_matrix_left']))
- print("Left Extrinsic matrix: " + str(self.check_npy_['cam_extrinsic_matrix_left']))
- self.left_rgb_np_ = self.check_npy_['img_0']['img_left']
- self.left_depth_np_ = self.check_npy_['img_0']['depth']
-
- cv2.imwrite('fake_depth.png',self.normalize_depth_image(self.left_depth_np_))
- self.joints_np_ = np.array(self.check_npy_['img_0']['joint_angles'])
- self.left_rgb_ = self.cv_bridge_.cv2_to_imgmsg(self.left_rgb_np_,'bgr8')
- self.left_depth_ = self.left_depth_np_.astype('float64').flatten().tolist()
- self.joints_ = self.joints_np_.astype('float64').tolist()
- self.publisher_ = self.create_publisher(InputFilesRealDataMulti,'/input_files_data_real_multi',1)
- left_input_file_msg = InputFilesRealData()
- left_input_file_msg.rgb = self.left_rgb_
- left_input_file_msg.depth_map = self.left_depth_
- left_input_file_msg.joints = self.joints_
- left_input_file_msg.camera_name = "left"
-
- print("Right Intrinsic matrix: " + str(self.check_npy_['cam_intrinsic_matrix_right']))
- print("Right Extrinsic matrix: " + str(self.check_npy_['cam_extrinsic_matrix_right']))
- self.right_rgb_np_ = self.check_npy_['img_0']['img_right']
- self.right_depth_np_ = self.check_npy_['img_0']['depth']
-
- self.joints_np_ = np.array(self.check_npy_['img_0']['joint_angles'])
- self.right_rgb_ = self.cv_bridge_.cv2_to_imgmsg(self.right_rgb_np_,'bgr8')
- self.right_depth_ = self.right_depth_np_.astype('float64').flatten().tolist()
- self.joints_ = self.joints_np_.astype('float64').tolist()
- self.publisher_ = self.create_publisher(InputFilesRealDataMulti,'/input_files_data_real_multi',1)
- right_input_file_msg = InputFilesRealData()
- right_input_file_msg.rgb = self.right_rgb_
- right_input_file_msg.depth_map = self.right_depth_
- right_input_file_msg.joints = self.joints_
- right_input_file_msg.camera_name = "right"
-
- self.input_file_msg = InputFilesRealDataMulti()
- self.input_file_msg.data_pieces = [left_input_file_msg, right_input_file_msg]
- time.sleep(1)
- self.publisher_.publish(self.input_file_msg)
- print("Published")
- self.is_ready_ = True
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
-def main(args=None):
- rclpy.init(args=args)
-
- read_data = ReadData()
-
- rclpy.spin(read_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- read_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_real_ur5_check.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_real_ur5_check.py
deleted file mode 100644
index 6223ea3..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_real_ur5_check.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Bool
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRealData
-
-class ReadData(Node):
- def __init__(self):
- super().__init__('read_data_node')
- self.is_ready_ = False
- self.cv_bridge_ = CvBridge()
- self.franka_info_folder_ = '/home/lawrence/cross_embodiment_ws/src/gazebo_env/ur5_check'
- self.check_npy_file_ = "/home/lawrence/cross_embodiment_ws/src/gazebo_env/ur5_check/check_ur5.npy"
- self.check_npy_ = np.load(self.check_npy_file_,allow_pickle=True).tolist()
- print("Intrinsic matrix: " + str(self.check_npy_['cam_intrinsic_matrix_left']))
- print("Extrinsic matrix: " + str(self.check_npy_['cam_extrinsic_matrix_left']))
- self.rgb_np_ = self.check_npy_['img_0']['img_left']
- self.depth_np_ = self.check_npy_['img_0']['depth']
-
- cv2.imwrite('fake_depth.png',self.normalize_depth_image(self.depth_np_))
- self.joints_np_ = np.array(self.check_npy_['img_0']['joint_angles'])
- self.rgb_ = self.cv_bridge_.cv2_to_imgmsg(self.rgb_np_,'bgr8')
- self.depth_ = self.depth_np_.astype('float64').flatten().tolist()
- self.joints_ = self.joints_np_.astype('float64').tolist()
- self.publisher_ = self.create_publisher(InputFilesRealData,'/input_files_data_real',1)
- input_file_msg = InputFilesRealData()
- input_file_msg.rgb = self.rgb_
- input_file_msg.depth_map = self.depth_
- input_file_msg.joints = self.joints_
- time.sleep(1)
- self.publisher_.publish(input_file_msg)
- print("Published")
- self.is_ready_ = True
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
-def main(args=None):
- rclpy.init(args=args)
-
- read_data = ReadData()
-
- rclpy.spin(read_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- read_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite.py
deleted file mode 100644
index 2dd7953..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite
-
-class ReadData(Node):
- def __init__(self):
- super().__init__('read_data_node')
- self.is_ready_ = False
- self.franka_info_folder_ = '/home/benchturtle/cross_embodiment_ws/src/gazebo_env/robosuite_offline_4/offline_ur5e/offline_ur5e_'
- timer_period = 3
- self.timer_ = self.create_timer(timer_period, self.timerCallback)
- self.rgb_ = "rgb.png"
- self.depth_ = "depth.npy"
- self.segmentation_ = "seg.png"
- self.joints_ = "joints.npy"
- self.publisher_ = self.create_publisher(InputFilesRobosuite,'/input_files',1)
- self.i_ = 0
- self.is_ready_ = True
-
- def timerCallback(self):
- if self.is_ready_ and self.i_ < 100:
- franka_folder = self.franka_info_folder_+str(int(self.i_ / 2)) +'/'
- rgb = franka_folder + self.rgb_
- depth = franka_folder + self.depth_
- segmentation = franka_folder + self.segmentation_
- joints = franka_folder + self.joints_
- input_file_msg = InputFilesRobosuite()
- input_file_msg.rgb = rgb
- input_file_msg.depth = depth
- input_file_msg.segmentation = segmentation
- input_file_msg.joints = joints
- self.publisher_.publish(input_file_msg)
- self.i_ += 1
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- read_data = ReadData()
-
- rclpy.spin(read_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- read_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_better.py
deleted file mode 100644
index 5e821d4..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_better.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Bool
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite
-
-class ReadData(Node):
- def __init__(self):
- super().__init__('read_data_node')
- self.is_ready_ = False
- self.franka_info_folder_ = '/home/benchturtle/cross_embodiment_ws/src/gazebo_env/robosuite_offline_4/offline_ur5e/offline_ur5e_'
- self.rgb_ = "rgb.png"
- self.depth_ = "depth.npy"
- self.segmentation_ = "seg.png"
- self.joints_ = "joints.npy"
- self.publisher_ = self.create_publisher(InputFilesRobosuite,'/input_files',1)
- self.i_ = 0
- self.sendInput()
- self.next_input_subscriber = self.create_subscription(Bool,'/ready_for_next_input',self.sendInput,1)
- self.is_ready_ = True
-
- def sendInput(self,msg=None):
- print("Num: " + str(self.i_))
- franka_folder = self.franka_info_folder_+str(self.i_) +'/'
- rgb = franka_folder + self.rgb_
- depth = franka_folder + self.depth_
- segmentation = franka_folder + self.segmentation_
- joints = franka_folder + self.joints_
- input_file_msg = InputFilesRobosuite()
- input_file_msg.rgb = rgb
- input_file_msg.depth = depth
- input_file_msg.segmentation = segmentation
- input_file_msg.joints = joints
- self.publisher_.publish(input_file_msg)
- self.i_ += 1
-
- def timerCallback(self):
- if self.is_ready_ and self.i_ < 100:
- franka_folder = self.franka_info_folder_+str(int(self.i_ / 2)) +'/'
- rgb = franka_folder + self.rgb_
- depth = franka_folder + self.depth_
- segmentation = franka_folder + self.segmentation_
- joints = franka_folder + self.joints_
- input_file_msg = InputFilesRobosuite()
- input_file_msg.rgb = rgb
- input_file_msg.depth = depth
- input_file_msg.segmentation = segmentation
- input_file_msg.joints = joints
- self.publisher_.publish(input_file_msg)
- self.i_ += 1
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- read_data = ReadData()
-
- rclpy.spin(read_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- read_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_diffusion_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_diffusion_better.py
deleted file mode 100644
index c2aaafa..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_diffusion_better.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Bool, Float64MultiArray, Int16
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesDiffusionData
-
-class ReadData(Node):
- def __init__(self):
- super().__init__('read_data_node')
- self.is_ready_ = False
- self.franka_info_folder_ = '/home/benchturtle/diffusion_data_noisy'
- self.rgb_folder_ = self.franka_info_folder_ + '/ur5e_rgb'
- self.rgb_folder_list_ = os.listdir(self.rgb_folder_)
- self.rgb_ = "rgb.png"
- self.depth_ = "depth.npy"
- self.segmentation_ = "seg.png"
- self.joints_ = "joints.npy"
- self.cv_bridge_ = CvBridge()
- self.publisher_ = self.create_publisher(InputFilesDiffusionData,'/input_files_data',1)
- self.i_ = 0
- self.j_ = 0
- self.i_limit_ = len([item for item in self.rgb_folder_list_ if os.path.isdir(os.path.join(self.rgb_folder_, item))])
- self.j_limits_ = []
- for i in range(self.i_limit_):
- str_i = str(i)
- rgb_traj_folder = self.rgb_folder_ + '/' + str_i
- image_list = os.listdir(rgb_traj_folder)
- j_limit = len(image_list)
- self.j_limits_.append(j_limit)
- self.first_time_ = True
- self.is_ready_ = True
- self.sendInput()
-
- def sendInput(self,msg=None):
- for i in range(self.i_limit_):
- for j in range(self.j_limits_[i]):
- print("Publishing Demo " + str(i) + " " + str(j) + '/' + str(self.j_limits_[i]))
- franka_folder = self.franka_info_folder_
- rgb_file = franka_folder + '/ur5e_rgb/' + str(i) + '/' + str(j) + '.jpg'
- depth_file = franka_folder + '/ur5e_depth/' + str(i) + '/' + str(j) + '.npy'
- seg_file = franka_folder + '/ur5e_mask/' + str(i) + '/' + str(j) + '.jpg'
- joint_file = franka_folder + '/ur5e_joint_gripper_pose/' + str(i) + '/' + str(j) + '.txt'
- joint_read_file = open(joint_file,"r")
- joint_file_contents = joint_read_file.read()
- joint_list = [float(x) for x in joint_file_contents[1:-1].split()]
- mod_joint_list = joint_list[:6] + [joint_list[-1]]
- rgb = cv2.imread(rgb_file)
- depth = np.load(depth_file)
- seg = cv2.imread(seg_file)
- rgb_array = rgb.flatten().tolist()
- depth_array = depth.flatten().tolist()
- segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(seg)
- input_file_robosuite_data_msg = InputFilesDiffusionData()
- input_file_robosuite_data_msg.rgb = rgb_array
- input_file_robosuite_data_msg.depth_map = depth_array
- input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- input_file_robosuite_data_msg.joints = mod_joint_list
- demo_num_msg = Int16()
- traj_num_msg = Int16()
- demo_num_msg.data = i
- traj_num_msg.data = j
- input_file_robosuite_data_msg.demo_num = demo_num_msg
- input_file_robosuite_data_msg.traj_num = traj_num_msg
- self.publisher_.publish(input_file_robosuite_data_msg)
- time.sleep(0.5)
-
- def timerCallback(self):
- if self.is_ready_ and self.i_ < 100:
- franka_folder = self.franka_info_folder_+str(int(self.i_ / 2)) +'/'
- rgb = franka_folder + self.rgb_
- depth = franka_folder + self.depth_
- segmentation = franka_folder + self.segmentation_
- joints = franka_folder + self.joints_
- input_file_msg = InputFilesRobosuite()
- input_file_msg.rgb = rgb
- input_file_msg.depth = depth
- input_file_msg.segmentation = segmentation
- input_file_msg.joints = joints
- self.publisher_.publish(input_file_msg)
- self.i_ += 1
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- read_data = ReadData()
-
- rclpy.spin(read_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- read_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_franka_to_franka.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_franka_to_franka.py
deleted file mode 100644
index 8cec761..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_franka_to_franka.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesFrankaToFranka
-import h5py
-from std_msgs.msg import Int8
-
-class ReadData(Node):
- def __init__(self):
- super().__init__('read_data_node')
- self.is_ready_ = False
- self.hdf5_ = '/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_to_franka/image_v141.hdf5'
- timer_period = 2
- self.timer_ = self.create_timer(timer_period, self.timerCallback)
- self.publisher_ = self.create_publisher(InputFilesFrankaToFranka,'/input_data',1)
- self.cv_bridge_ = CvBridge()
- self.is_ready_ = True
-
- def timerCallback(self):
- if(self.is_ready_):
- with h5py.File(self.hdf5_, 'r+') as file:
- for i in range(len(file['data'].keys())):
- demo_str = 'demo_'+str(i)
- obs = file['data'][demo_str]['obs']
- for j in range(2 * obs['agentview_image'].shape[0]):
- rgb = obs['agentview_image'][int(j / 2)]
- joints = obs['robot0_joint_pos'][int(j / 2)]
- gripper = abs(obs['robot0_gripper_qpos'][int(j / 2)][0])
- joints_and_gripper = np.append(joints,gripper)
- input_msg = InputFilesFrankaToFranka()
- input_msg.rgb = self.cv_bridge_.cv2_to_imgmsg(rgb)
- input_msg.joints = joints_and_gripper.tolist()
- demo_num_msg = Int8()
- traj_num_msg = Int8()
- demo_num_msg.data = i
- traj_num_msg.data = int(j / 2)
- input_msg.demo_num = demo_num_msg
- input_msg.traj_num = traj_num_msg
-
- self.publisher_.publish(input_msg)
- print("Publishing Demo " + str(i) + " " + str(int(j / 2)) + '/' + str(obs['agentview_image'].shape[0]))
- time.sleep(2)
- print("Done")
- exit()
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- read_data = ReadData()
-
- rclpy.spin(read_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- read_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_franka_to_franka_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_franka_to_franka_better.py
deleted file mode 100644
index 3ba4b50..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/read_data_node_robosuite_franka_to_franka_better.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesFrankaToFranka
-import h5py
-from std_msgs.msg import Int16
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Bool
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite
-
-class ReadData(Node):
- def __init__(self):
- super().__init__('read_data_node')
- self.is_ready_ = False
- self.hdf5_ = '/home/benchturtle/cross_embodiment_ws/src/gazebo_env/franka_to_franka/image_v141.hdf5'
- self.rgb_ = "rgb.png"
- self.depth_ = "depth.npy"
- self.segmentation_ = "seg.png"
- self.joints_ = "joints.npy"
- self.cv_bridge_ = CvBridge()
- self.publisher_ = self.create_publisher(InputFilesFrankaToFranka,'/input_data',1)
- self.i_ = 0
- self.j_ = 0
-
- #self.next_input_subscriber = self.create_subscription(Bool,'/ready_for_next_input',self.sendInput,1)
- self.file_ = h5py.File(self.hdf5_,'r+')
- self.i_limit_ = len(self.file_['data'].keys())
- self.demo_list_ = [f"demo_{i}" for i in range(0, self.i_limit_)]
- self.obses_ = []
- self.j_limits_ = []
- for demo_str in self.demo_list_:
- single_obs = self.file_['data'][demo_str]['obs']
- j_limit = single_obs['agentview_image'].shape[0]
- self.obses_.append(self.file_['data'][demo_str]['obs'])
- self.j_limits_.append(j_limit)
- self.sendInput()
- self.is_ready_ = True
-
- def sendInput(self,msg=None):
- for i in range(self.i_limit_):
- for j in range(self.j_limits_[i]):
- print("Publishing Demo " + str(i) + " " + str(j) + '/' + str(self.j_limits_[i]))
- rgb = self.obses_[i]['agentview_image'][j]
- joints = self.obses_[i]['robot0_joint_pos'][j]
- gripper = abs(self.obses_[i]['robot0_gripper_qpos'][j][0])
- joints_and_gripper = np.append(joints,gripper)
- input_msg = InputFilesFrankaToFranka()
- input_msg.rgb = self.cv_bridge_.cv2_to_imgmsg(rgb)
- input_msg.joints = joints_and_gripper.tolist()
- demo_num_msg = Int16()
- traj_num_msg = Int16()
- demo_num_msg.data = i
- traj_num_msg.data = j
- input_msg.demo_num = demo_num_msg
- input_msg.traj_num = traj_num_msg
- self.publisher_.publish(input_msg)
- time.sleep(1)
-
-
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- read_data = ReadData()
-
- rclpy.spin(read_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- read_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/real_offline_check_node.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/real_offline_check_node.py
deleted file mode 100644
index c7366d4..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/real_offline_check_node.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-from sensor_msgs.msg import Image
-from message_filters import Subscriber, TimeSynchronizer
-import cv2
-from cv_bridge import CvBridge
-import numpy as np
-from scipy.ndimage import generic_filter
-
-class ImageSubscriber(Node):
- def __init__(self):
- super().__init__('image_subscriber')
- self.cv_bridge_ = CvBridge()
- depth_image_sub = Subscriber(self, Image, '/ur5_camera/image_raw')
- depth_depth_image_sub = Subscriber(self, Image, '/ur5_camera/depth/image_raw')
-
- # Synchronize the two image topics based on their timestamps
- ts = TimeSynchronizer([depth_image_sub, depth_depth_image_sub], 10)
- ts.registerCallback(self.callback)
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def replace_inf_with_neighbors_average(self,array):
- def replace_function(subarray):
- center = subarray[4]
- if(center != np.inf and center != -np.inf and center != np.nan):
- return center
- valid_values = subarray[subarray != -np.inf]
- if valid_values.size == 0:
- return np.nan
- else:
- return np.nanmean(valid_values)
-
- return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
-
-
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- # TODO(kush): Clean this up to use actual data, not file names
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- rgb_np = np.array(rgb,dtype=np.uint8)
- depth_np = np.array(depth,dtype=np.float64)
- seg = seg_file
-
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- robosuite_depth_image_unmasked = depth_np
- robosuite_rgb_image_unmasked = rgb_np
- robosuite_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- robosuite_rgbd_image_unmasked = np.concatenate((robosuite_rgb_image_unmasked,robosuite_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_robosuite_segmentation_mask_255 = cv2.bitwise_not(robosuite_segmentation_mask_255)
- robosuite_rgbd_image_masked = cv2.bitwise_and(robosuite_rgbd_image_unmasked,robosuite_rgbd_image_unmasked,mask=inverted_robosuite_segmentation_mask_255)
- robosuite_rgb_image_masked = robosuite_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- robosuite_depth_image_masked = robosuite_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],robosuite_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 50
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- cv2.imwrite('gazebo_inpaint.png',image_8bit)
-
- def dummyInpainting(self,rgb,gazebo_rgb,gazebo_seg):
- import pdb
- pdb.set_trace()
- _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
- gazebo_segmentation_mask_255 = gazebo_seg
- inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
- cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
- inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
- outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
- gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
- cv2.imwrite('gazebo_robot_only.png',gazebo_only)
- background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
- cv2.imwrite('background_only.png',background_only)
- background_only_filled = cv2.inpaint(background_only,outline_mask,3,cv2.INPAINT_TELEA)
- cv2.imwrite('background_only_filled.png',background_only_filled)
- dummy_inpaint = gazebo_only + background_only_filled
- cv2.imwrite('dummy_inpaint.png',dummy_inpaint)
- ultimate_robot = cv2.imread('/home/benchturtle/cross_embodiment_ws/ultimate_robot.png')
- ultimate_background = cv2.imread('/home/benchturtle/cross_embodiment_ws/ultimate_background.png')
- cv2.imwrite('smart_inpaint.png',ultimate_robot+ultimate_background)
-
- def callback(self, rgb_image_msg, depth_image_msg):
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb_image_msg)
-
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(depth_image_msg)
- cv2.imwrite('inf_mask.png',(np.isinf(gazebo_depth_np)*255).astype(np.uint8))
- normalized_depth_image = self.normalize_depth_image(gazebo_depth_np)
- cv2.imwrite('gazebo_norm_depth.png',normalized_depth_image)
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- robosuite_rgb = cv2.imread('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/real_check/ur5e_check_0.png')
-
- robosuite_depth = np.load('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/real_check/ur5e_depth_0.npy')
- cv2.imwrite('real_rgb.png',robosuite_rgb)
- cv2.imwrite('nan_depth_mask.png',(np.isnan(robosuite_depth)*255).astype(np.uint8))
- cv2.imwrite('inf_depth_mask.png',(np.isinf(robosuite_depth)*255).astype(np.uint8))
- inf_mask = (np.isinf(robosuite_depth)*255).astype(np.uint8)
- robosuite_depth_inf_filled = cv2.inpaint(robosuite_depth,inf_mask,1,cv2.INPAINT_TELEA)
- cv2.imwrite('inf_depth_mask2.png',(np.isinf(robosuite_depth_inf_filled)*255).astype(np.uint8))
- nan_mask = (np.isnan(robosuite_depth_inf_filled)*255).astype(np.uint8)
- robosuite_depth_inf_filled_nan_filled = cv2.inpaint(robosuite_depth_inf_filled,nan_mask,1,cv2.INPAINT_TELEA)
- cv2.imwrite('nan_depth_mask2.png',(np.isnan(robosuite_depth_inf_filled_nan_filled)*255).astype(np.uint8))
- robosuite_depth_no_nan = np.nan_to_num(robosuite_depth_inf_filled_nan_filled, nan=0)
- normalized_robosuite_depth_image = self.normalize_depth_image(robosuite_depth_no_nan)
- cv2.imwrite('real_depth.png',normalized_robosuite_depth_image)
- robosuite_depth = gazebo_depth_np
- robosuite_seg = gazebo_seg_255_np
- self.dummyInpainting(robosuite_rgb,gazebo_rgb_np,gazebo_seg_255_np)
- # Depth image callback logic
- self.get_logger().info('Received synchronized depth camera images')
-
-def main(args=None):
- rclpy.init(args=args)
-
- image_subscriber = ImageSubscriber()
-
- rclpy.spin(image_subscriber)
-
- image_subscriber.destroy_node()
- rclpy.shutdown()
-
-if __name__ == '__main__':
- main()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/reproject_test_node.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/reproject_test_node.py
index 7b3ed95..27591ae 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/reproject_test_node.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/reproject_test_node.py
@@ -60,9 +60,9 @@ def listenerCallbackOnlineDebug(self,msg):
data_dict = dict(zip(map(tuple,keys),map(tuple,values)))
points = np.array([x.reshape(-1),y.reshape(-1),z.reshape(-1)]).T
- new_frame_to_old_frame = np.array([[0.98,0,0.199,0],
- [0,1,0,0],
- [-0.199,0,0.980,0],
+ new_frame_to_old_frame = np.array([[0.995,-0.1,0.02,0],
+ [0.1,0.995,-0.004,0],
+ [-0.02,0.006,1,0],
[0,0,0,1]])
homogenous_points = np.column_stack((points, np.ones(len(points))))
# Create an Open3D point cloud
@@ -76,6 +76,9 @@ def listenerCallbackOnlineDebug(self,msg):
point_dict = dict(zip(map(tuple,transformed_points),map(tuple,homogenous_points)))
new_image_mask = np.zeros((msg.rgb.height,msg.rgb.width)).astype(np.uint8)
new_image = np.zeros((msg.rgb.height,msg.rgb.width,3)).astype(np.uint8)
+ new_depth = np.zeros((msg.rgb.height,msg.rgb.width)).astype(np.float64)
+ import pdb
+ pdb.set_trace()
new_fx = fx
new_fy = fy
new_cx = cx
@@ -108,6 +111,7 @@ def listenerCallbackOnlineDebug(self,msg):
cv2.imwrite('image_overlap.png',new_image_mask)
cv2.imwrite('image_overlap_color.png',new_image)
cv2.imwrite('image_overlap_inpaint.png',inpainted_image)
+ return inpainted_image,
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/test_urdf_parser.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/test_urdf_parser.py
deleted file mode 100644
index 00943ac..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/test_urdf_parser.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import subprocess
-import xml.etree.ElementTree as ET
-xacro_command = "ros2 run xacro xacro /home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5e_gazebo.urdf.xacro"
-xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
-)
-urdf_string = ""
-while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
-root = ET.fromstring(urdf_string)
-for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- print(origin_element.attrib)
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- print(link_name)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/ur5e_joint_command_publisher.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/ur5e_joint_command_publisher.py
deleted file mode 100644
index 2b1802d..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/ur5e_joint_command_publisher.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python3
-import rclpy
-from rclpy.node import Node
-from std_msgs.msg import Float64MultiArray
-import numpy as np
-
-class JointCommandsPublisher(Node):
- def __init__(self):
- super().__init__('joint_commands_publisher')
- self.publisher_ = self.create_publisher(Float64MultiArray, 'joint_commands', 10)
- self.timer = self.create_timer(0.1, self.publish_joint_commands)
-
- def publish_joint_commands(self):
- # Creating a Float64MultiArray message
- msg = Float64MultiArray()
-
- # Example joint commands
- joint_commands = [-3.34355599084963, -1.3984358946429651, 1.723738193511963, -1.8961899916278284, -1.5677741209613245, 4.5127763748168945,0.0] # Replace this with your joint commands
- joint_commands[0] += np.pi
-
- # Assigning the joint commands to the Float64MultiArray message
- msg.data = joint_commands
-
- # Publishing the message
- self.publisher_.publish(msg)
- self.get_logger().info('Publishing joint commands: %s' % str(msg.data))
-
-def main(args=None):
- rclpy.init(args=args)
- joint_commands_publisher = JointCommandsPublisher()
- rclpy.spin(joint_commands_publisher)
- joint_commands_publisher.destroy_node()
- rclpy.shutdown()
-
-if __name__ == '__main__':
- main()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/ur5e_point_cloud_publisher.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/ur5e_point_cloud_publisher.py
deleted file mode 100644
index 555ae05..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/ur5e_point_cloud_publisher.py
+++ /dev/null
@@ -1,749 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-
-class PointCloudPublisher(Node):
- def __init__(self):
- self.is_ready_ = False
- super().__init__('ur5e_point_cloud_publisher')
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","ur5e_nvidia_with_gripper_solo.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
-
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- self.original_image_ = None
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 10
- )
- self.camera_raw_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.imageRawCallback,
- 10
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",10)
- self.just_robot_image_publisher_ = self.create_publisher(Image,"plain_ur5_image",10)
-
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
-
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",10)
-
- self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",10)
- setup_mesh_timer = self.create_timer(timer_period,self.setupMeshes)
- self.is_ready_ = True
-
- #exit()
- #self.sync_ = TimeSynchronizer(self.subscribers_,10)
- #self.sync_.registerCallback(self.pointcloud_callback)
-
- def imageRawCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
-
- def fullPointcloudCallback(self,msg):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_, dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- for coord in all_pixels:
- x,y = coord
- # TODO Change 5 pixel hardcoded offset. Sorry Professor Chen, I'll do better :)
- mask_image[round(y),round(x)-5] = white_color
- depth_image[round(y),round(x)-5] = depth_data[i]
- i += 1
- np.save('/home/benchturtle/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
-
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/benchturtle/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/benchturtle/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- print("I am the Senate")
-
- def setupMeshesFast(self):
- if(self.is_ready_):
- print(self.original_meshes_)
- full_open3d_mesh = None
- start_time = time.time()
- for [filename,link_name,mesh,rpy_str,xyz_str] in self.links_info_:
- print(self.links_info_[0])
- resulting_mesh = self.setupMesh(filename,link_name,mesh,rpy_str,xyz_str)
- copied_mesh = o3d.geometry.TriangleMesh()
- copied_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(resulting_mesh.vertices))
- copied_mesh.triangles = o3d.utility.Vector3iVector(np.asarray(resulting_mesh.triangles))
- if full_open3d_mesh is None:
- full_open3d_mesh = copied_mesh
- else:
- full_open3d_mesh += copied_mesh
- pcd = full_open3d_mesh.sample_points_uniformly(number_of_points=20000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- o3d.io.write_point_cloud('/home/benchturtle/ur5e.pcd',pcd)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "camera_color_optical_frame"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- end_time = time.time()
-
- def setupMeshes(self):
- if(self.is_ready_):
- print("HERE")
-
- open3d_mesh = None
- start_time = time.time()
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- print("Total time: " + str(time.time() - start_time))
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=20000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- o3d.io.write_point_cloud('/home/benchturtle/ur5e.pcd',pcd)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "camera_color_optical_frame"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- end_time = time.time()
-
- def prelimMeshFast(self,filename):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=500)
- test_pcd_data = np.asarray(test_pcd.points)
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- return open3d_mesh
-
- def setupMeshFast(self,filename,link_name,open3d_mesh,rpy_str,xyz_str):
- if(self.is_ready_):
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- open3d_mesh.rotate(R2,[0,0,0])
- open3d_mesh.translate(xyz_np)
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- print(t_matrix,flush=True)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=500)
- test_pcd_data = np.asarray(test_pcd.points)
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- #open3d_mesh = fast_mesh
- start_time = time.time()
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- open3d_mesh.rotate(R2,[0,0,0])
- open3d_mesh.translate(xyz_np)
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
- #print("Error finding transform between camera frame and " + str(link_name))
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def pointcloud_callback(self,msg1,msg2,msg3,msg4,msg5,msg6,msg7):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- else:
- msg1_pixels = self.getPixels(msg1)
- msg2_pixels = self.getPixels(msg2)
- msg3_pixels = self.getPixels(msg3)
- msg4_pixels = self.getPixels(msg4)
- msg5_pixels = self.getPixels(msg5)
- msg6_pixels = self.getPixels(msg6)
- msg7_pixels = self.getPixels(msg7)
- all_pixels = np.vstack((msg1_pixels,msg2_pixels,msg3_pixels,msg4_pixels,msg5_pixels,msg6_pixels,msg7_pixels))
- mask_image = np.zeros(self.image_shape_, dtype=np.uint8)
- white_color = (255,255,255)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(y),round(x)] = white_color
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(mask_image,encoding="bgr8")
- self.mask_image_publisher_.publish(ros_mask_image)
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- msg_data_homogenous = np.hstack((msg_data,np.ones((msg_data.shape[0],1))))
- pixel_data_homogenous = np.dot(self.camera_intrinsic_matrix_,msg_data_homogenous.T).T
- pixel_data = pixel_data_homogenous[:,:2] / pixel_data_homogenous[:,2:]
- return pixel_data, depth_data
-
- def createTransform(self,rpy_str,xyz_str):
- if(self.is_ready_):
- rpy_array = rpy_str.split(' ')
- xyz_array = xyz_str.split(' ')
- roll = float(rpy_array[0])
- pitch = float(rpy_array[1])
- yaw = float(rpy_array[2])
- x = float(xyz_array[0])
- y = float(xyz_array[1])
- z = float(xyz_array[2])
-
- rotation_x = np.array([[1, 0, 0],
- [0, math.cos(roll), -math.sin(roll)],
- [0, math.sin(roll), math.cos(roll)]])
-
- rotation_y = np.array([[math.cos(pitch), 0, math.sin(pitch)],
- [0, 1, 0],
- [-math.sin(pitch), 0, math.cos(pitch)]])
-
- rotation_z = np.array([[math.cos(yaw), -math.sin(yaw), 0],
- [math.sin(yaw), math.cos(yaw), 0],
- [0, 0, 1]])
-
- rotation_matrix = np.dot(rotation_z, np.dot(rotation_y, rotation_x))
- translation_vector = np.array([[x],[y],[z]])
- transform_matrix = np.concatenate((rotation_matrix,translation_vector),axis=1)
- transform_matrix = np.concatenate((transform_matrix,np.array([[0,0,0,1]])),axis=0)
- return transform_matrix
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def debugTimerCallback(self,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str):
- if(self.is_ready_):
- mesh_scene = trimesh.load(filename)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # pcd_data = np.asarray(pcd.points)
- # if(link_name == "base_link_inertia"):
- # pcd_data = pcd_data[:,[0,2,1]]
- # pcd_data[:,1] *= -1
- # elif(link_name == "shoulder_link"):
- # # R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- # # open3d_mesh.rotate(R,[0,0,0])
- # # # print("RPY")
- # # # print(rpy_str)
- # # # print("XYZ Str")
- # # # print(xyz_str)
- # # rpy_str_list = rpy_str.split()
- # # rpy_floats = [float(x) for x in rpy_str_list]
- # # rpy_np = np.array(rpy_floats)
- # # # print("RPY NUMPY")
- # # # print(rpy_np)
- # # R2 = pcd.get_rotation_matrix_from_xyz(rpy_np)
- # # # print("We have R2 with us")
- # # # print(R2)
- # # open3d_mesh.rotate(R2,[0,0,0])
- # # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # # pcd_data = np.asarray(pcd.points)
- # # #pcd_data = pcd_data[:,[0,2,1]]
- # # #pcd_data[:,0] *= -1
-
- # R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- # open3d_mesh.rotate(R,[0,0,0])
- # # print("RPY")
- # # print(rpy_str)
- # # print("XYZ Str")
- # # print(xyz_str)
- # rpy_str_list = rpy_str.split()
- # rpy_floats = [float(x) for x in rpy_str_list]
- # rpy_np = np.array(rpy_floats)
- # xyz_str_list = xyz_str.split()
- # xyz_floats = [float(x) for x in xyz_str_list]
- # xyz_np = 1000 * np.array(xyz_floats)
- # print("XYZ NUMPY")
- # print(xyz_np)
- # R21 = pcd.get_rotation_matrix_from_xyz(rpy_np)
- # print("We have R21 with us")
- # print(R21)
- # R22 = pcd.get_rotation_matrix_from_xzy(rpy_np)
- # print("We have R22 with us")
- # print(R22)
- # R23 = pcd.get_rotation_matrix_from_yxz(rpy_np)
- # print("We have R23 with us")
- # print(R23)
- # R24 = pcd.get_rotation_matrix_from_yzx(rpy_np)
- # print("We have R24 with us")
- # print(R24)
- # R25 = pcd.get_rotation_matrix_from_zxy(rpy_np)
- # print("We have R25 with us")
- # print(R25)
- # R26 = pcd.get_rotation_matrix_from_zyx(rpy_np)
- # print("We have R26 with us")
- # print(R26)
- # R2 = self.eulerToR(rpy_np)
- # print("We have R2 with us")
- # print(R2)
- # open3d_mesh.rotate(R2,[0,0,0])
- # open3d_mesh.translate(xyz_np)
- # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # pcd_data = np.asarray(pcd.points)
- # elif(link_name == "upper_arm_link"):
- # R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- # open3d_mesh.rotate(R,[0,0,0])
- # # print("RPY")
- # # print(rpy_str)
- # # print("XYZ Str")
- # # print(xyz_str)
- # rpy_str_list = rpy_str.split()
- # rpy_floats = [float(x) for x in rpy_str_list]
- # rpy_np = np.array(rpy_floats)
- # xyz_str_list = xyz_str.split()
- # xyz_floats = [float(x) for x in xyz_str_list]
- # xyz_np = 1000 * np.array(xyz_floats)
- # print("XYZ NUMPY")
- # print(xyz_np)
- # R21 = pcd.get_rotation_matrix_from_xyz(rpy_np)
- # print("We have R21 with us")
- # print(R21)
- # R22 = pcd.get_rotation_matrix_from_xzy(rpy_np)
- # print("We have R22 with us")
- # print(R22)
- # R23 = pcd.get_rotation_matrix_from_yxz(rpy_np)
- # print("We have R23 with us")
- # print(R23)
- # R24 = pcd.get_rotation_matrix_from_yzx(rpy_np)
- # print("We have R24 with us")
- # print(R24)
- # R25 = pcd.get_rotation_matrix_from_zxy(rpy_np)
- # print("We have R25 with us")
- # print(R25)
- # R26 = pcd.get_rotation_matrix_from_zyx(rpy_np)
- # print("We have R26 with us")
- # print(R26)
- # R2 = self.eulerToR(rpy_np)
- # print("We have R2 with us")
- # print(R2)
- # open3d_mesh.rotate(R2,[0,0,0])
- # open3d_mesh.translate(xyz_np)
- # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # pcd_data = np.asarray(pcd.points)
- # elif(link_name == "forearm_link"):
- # R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- # open3d_mesh.rotate(R,[0,0,0])
- # rpy_str_list = rpy_str.split()
- # rpy_floats = [float(x) for x in rpy_str_list]
- # rpy_np = np.array(rpy_floats)
- # xyz_str_list = xyz_str.split()
- # xyz_floats = [float(x) for x in xyz_str_list]
- # xyz_np = 1000 * np.array(xyz_floats)
- # print("XYZ NUMPY")
- # print(xyz_np)
- # R2 = self.eulerToR(rpy_np)
- # print("We have R2 with us")
- # print(R2)
- # open3d_mesh.rotate(R2,[0,0,0])
- # open3d_mesh.translate(xyz_np)
- # pcd = open3d_mesh.sample_points_uniformly(number_of_points=100000)
- # pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- # pcd_data = np.asarray(pcd.points)
- # #pcd_data = pcd_data[:,[1,0,2]]
- # #pcd_data[:,0] *= -1
-
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- open3d_mesh.rotate(R2,[0,0,0])
- open3d_mesh.translate(xyz_np)
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time()
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- except TransformException as ex:
- print("Tough")
- return
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=100)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
-
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "camera_color_optical_frame"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- publisher.publish(point_cloud_msg)
-
- def timerCallback(self,filename,link_name,publisher,rpy_str,xyz_str):
- if(self.is_ready_):
- mesh = trimesh.load(filename)
- transform_matrix = self.createTransform(rpy_str,xyz_str)
- o3d_mesh = None
- pcds = []
- points = None
- R = np.array([[1,0,0],[0,0,-1],[0,1,0]])
- R2 = np.array([[-1,0,0],[0,-1,0],[0,0,1]])
- for item in mesh.geometry:
- o3d_mesh = mesh.geometry[item].as_open3d
- #o3d_mesh = o3d_mesh.rotate(R)
- pcd = o3d_mesh.sample_points_uniformly(number_of_points=100000)
- if points is None:
- points = np.asarray(pcd.points)
- else:
- points = np.concatenate((points,pcd.points),axis=0)
- pcds.append(pcd)
- new_pcd = o3d.geometry.PointCloud()
- new_pcd.points = o3d.utility.Vector3dVector(points)
- center_translation = -new_pcd.get_center()
- new_pcd = new_pcd.translate(center_translation)
- new_pcd = new_pcd.rotate(R)
- new_pcd = new_pcd.rotate(R2)
-
- new_pcd = new_pcd.transform(transform_matrix)
- #print(transform_matrix)
- #mesh = mesh.apply_transform(transform_matrix)
- # vertices = None
- # for item in mesh.geometry:
- # if vertices is None:
- # vertices = mesh.geometry[item].vertices
- # else:
- # vertices = np.concatenate((vertices,mesh.geometry[item].vertices),axis=0)
-
- # # Create a visualization window
- # pcd = o3d.geometry.PointCloud()
- # pcd.points = o3d.utility.Vector3dVector(vertices)
- # R = np.array([[1,0,0],[0,0,-1],[0,1,0]])
- # R2 = np.array([[-1,0,0],[0,-1,0],[0,0,1]])
- # pcd = pcd.rotate(R)
- # pcd = pcd.rotate(R2)
- #pcd = pcd.transform(transform_matrix)
- # Pointcloud still rotated incorrectly
- #additional_transform = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
- #pcd = pcd.transform(additional_transform)
- pcd_data = np.asarray(new_pcd.points) / 1000
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = link_name
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
-
- publisher.publish(point_cloud_msg)
- # self.dae_paths_ = glob.glob(self.dae_main_path_ + '/*.dae')
- # self.publishers_ = []
- # self.timers_ = []
- # timer_period = 0.5
- # for dae_path in self.dae_paths_:
- # ur5_part = dae_path[dae_path.rfind('/')+1:dae_path.rind('.')]
- # publisher = self.create_publisher(PointCloud2,ur5_part+'_pointcloud',10)
- # self.publishers_.append(publisher)
- # timer = self.create_timer(timer_period,partial(self.timerCallback,param1=dae_path))
-
- # self.ur5_parts_ = ['base','forearm','shoulder','upperarm','wrist1','wrist2','wrist3']
- # self.publishers_ = []
- # self.timers_ = []
- # timer_period = 0.5
- # for ur5_part in self.ur5_parts_:
- # publisher = self.create_publisher(PointCloud2,ur5_part+'_pointcloud',10)
- # self.publishers_.append(publisher)
- # timer = self.create_timer(timer_period,partial(self.timerCallback,param1=ur5_part))
- # self.publisher_ = self.create_publisher(PointCloud2, '/base_pointcloud', 10)
- # timer_period = 0.5 # seconds
- # self.timer = self.create_timer(timer_period, self.timer_callback)
- # self.i = 0
-
- # print(glob.glob(self.dae_main_path_ + '/*.dae'))
- # exit()
- # self.base_filepath_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/meshes/ur5e/visual/base.dae"
-
- # def timer_callback(self):
- # mesh = trimesh.load(self.base_filepath_)
- # vertices = None
- # for item in mesh.geometry:
- # if vertices is None:
- # vertices = mesh.geometry[item].vertices
- # else:
- # vertices = np.concatenate((vertices,mesh.geometry[item].vertices),axis=0)
-
- # # Create a visualization window
- # pcd = o3d.geometry.PointCloud()
- # pcd.points = o3d.utility.Vector3dVector(vertices)
-
- # pcd_data = np.asarray(pcd.points) / 1000
- # np.savetxt('data.csv',pcd_data,delimiter=", ")
- # point_cloud_msg = PointCloud2()
- # point_cloud_msg.header = Header()
- # point_cloud_msg.header.frame_id = 'base_link_inertia'
- # fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- # PointField(name='z', offset=4, datatype=PointField.FLOAT32, count=1),
- # PointField(name='y', offset=8, datatype=PointField.FLOAT32, count=1),
- # ]
- # point_cloud_msg.height = 1
- # point_cloud_msg.width = len(pcd_data)
- # point_cloud_msg.fields = fields
- # point_cloud_msg.is_bigendian = False
- # point_cloud_msg.point_step = 3 * 4
- # point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- # point_cloud_msg.is_dense = True
- # point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
-
- # self.publisher_.publish(point_cloud_msg)
- # self.i += 1
-
-def main(args=None):
- rclpy.init(args=args)
-
- point_cloud_publisher = PointCloudPublisher()
-
- rclpy.spin(point_cloud_publisher)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- point_cloud_publisher.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/visualize_dae.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/visualize_dae.py
deleted file mode 100644
index 34969d6..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/visualize_dae.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import open3d as o3d
-import trimesh
-import numpy as np
-import matplotlib.pyplot as plt
-import math
-# Load the .dae file
-filename = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/meshes/panda/visual/link6.dae"
-mesh_scene = trimesh.load(filename)
-mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
-# Convert Trimesh to Open3D TriangleMesh
-vertices = o3d.utility.Vector3dVector(mesh.vertices)
-triangles = o3d.utility.Vector3iVector(mesh.faces)
-open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
-print(open3d_mesh)
-R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
-#open3d_mesh.rotate(R,[0,0,0])
-pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
-pcd_data = np.asarray(pcd.points)
-mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.001, origin=[0,0,0])
-#open3d_mesh.translate(np.array([0,0,0.03679997502]))
-t_matrix = np.array([[np.cos(-math.pi/4), -np.sin(-math.pi/4), 0.0,0],
- [np.sin(-math.pi/4), np.cos(-math.pi/4), 0.0,0],
- [0.0, 0.0, 1.0,0.03679997502],
- [0,0,0,1]])
-open3d_mesh.transform(t_matrix)
-pcd2 = open3d_mesh.sample_points_uniformly(number_of_points=200000)
-# Create an Open3D visualization window
-o3d.visualization.draw_geometries([pcd,mesh_coordinate_frame])
-print(np.min(pcd_data[:, 1]))
-exit()
-o3d_mesh = None
-pcds = []
-points = None
-#R = np.array([[1,0,0],[0,0,-1],[0,1,0]])
-#R2 = np.array([[-1,0,0],[0,-1,0],[0,0,1]])
-print(mesh)
-for item in mesh.geometry:
- o3d_mesh = mesh.geometry[item].as_open3d
- print(o3d_mesh.get_center() / 1000)
- pcd = o3d_mesh.sample_points_uniformly(number_of_points=100000)
- if points is None:
- points = np.asarray(pcd.points)
- else:
- points = np.concatenate((points,pcd.points),axis=0)
- pcds.append(pcd)
-new_pcd = o3d.geometry.PointCloud()
-new_pcd.points = o3d.utility.Vector3dVector(points)
-#new_pcd = new_pcd.rotate(R)
-#new_pcd = new_pcd.rotate(R2)
-new_pcd.points = o3d.utility.Vector3dVector(np.asarray(new_pcd.points) / 1000)
-#center_translation = -new_pcd.get_center()
-#new_pcd = new_pcd.translate(center_translation)
-#print(new_pcd.get_center())
-
-
-o3d.visualization.draw_geometries([new_pcd,mesh_coordinate_frame])
-
-# mesh = trimesh.load(collada_filepath)
-# #print(transform_matrix)
-# #mesh = mesh.apply_transform(transform_matrix)
-# vertices = None
-# for item in mesh.geometry:
-# if vertices is None:
-# vertices = mesh.geometry[item].vertices
-# else:
-# vertices = np.concatenate((vertices,mesh.geometry[item].vertices),axis=0)
-
-# # Create a visualization window
-# pcd = o3d.geometry.PointCloud()
-# pcd.points = o3d.utility.Vector3dVector(vertices)
-# R = np.array([[1,0,0],[0,0,-1],[0,1,0]])
-# R2 = np.array([[-1,0,0],[0,-1,0],[0,0,1]])
-# pcd = pcd.rotate(R)
-# pcd = pcd.rotate(R2)
-# mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
-# size=100, origin=[0,0,0])
-# o3d.visualization.draw_geometries([pcd,mesh_coordinate_frame])
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/visualize_mesh_scene_dae.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/visualize_mesh_scene_dae.py
deleted file mode 100644
index 7d3ac46..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/visualize_mesh_scene_dae.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import trimesh
-import open3d as o3d
-import numpy as np
-# Load the 3D scene from the DAE file using trimesh
-dae_file_path = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/meshes/panda/visual/link6.dae"
-scene = trimesh.load(dae_file_path)
-
-# Extract the geometry (triangular meshes) from the loaded scene
-meshes = scene.geometry
-for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.011791708069958752]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
-#meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
-mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-#vertices = o3d.utility.Vector3dVector(mesh.vertices)
-#triangles = o3d.utility.Vector3iVector(mesh.faces)
-#open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
-# import pdb
-# pdb.set_trace()
-vertices = o3d.utility.Vector3dVector(mesh.vertices)
-triangles = o3d.utility.Vector3iVector(mesh.faces)
-open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
-# open3d_mesh.translate(np.array([0,0,-0.011791708069958752]))
-pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
-mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1,origin=[0,0,0])
-# # Visualize the scene using Open3D
-# print(np.min(pcd_data[:, 2]) - np.min(pcd2_data[:, 2]))
-# print(np.max(pcd2_data[:, 2]) - np.min(pcd2_data[:, 2]))
-
-o3d.visualization.draw_geometries([pcd,mesh_coordinate_frame])
-# 'Shell012_000-mesh'
-# ('Shell006_000-mesh', )])
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node.py
deleted file mode 100644
index 3be8273..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node.py
+++ /dev/null
@@ -1,485 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFiles, InputFilesData
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.panda_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/panda_arm_hand_only_ik.urdf"
- self.panda_solver_ = TracIKSolver(self.panda_urdf_,"world","panda_hand")
- self.ur5e_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo_ik.urdf"
- self.chain_ = kp.build_chain_from_urdf(open("/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5e_nvidia_with_gripper_solo_ik.urdf").read())
- self.ur5e_solver_ = TracIKSolver(self.ur5e_urdf_,"world","tool0")
- self.camera_to_base_link_ = np.array([[-0.707,0.707,0,0],
- [0.408 , 0.408, -0.816, 0],
- [-0.577, -0.577 ,-0.577 , 2598],
- [0,0,0,1]])
- self.ur5e_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.ur5e_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.subscription_ = self.create_subscription(
- InputFiles,
- 'input_files',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesData,
- 'input_files_data',
- self.listenerCallbackData,
- 1)
- self.joint_state_msg = JointState()
- self.q_init_ = np.zeros(self.ur5e_solver_.number_of_joints)
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","ur5e_nvidia_with_gripper_solo.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
-
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- self.original_image_ = None
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 10
- )
- self.camera_raw_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.imageRawCallback,
- 10
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",10)
- self.just_robot_image_publisher_ = self.create_publisher(Image,"plain_ur5_image",10)
-
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
-
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",10)
- self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",10)
- self.is_ready_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def imageRawCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
-
- def prelimMeshFast(self,filename):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=500)
- test_pcd_data = np.asarray(test_pcd.points)
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- msg_data_homogenous = np.hstack((msg_data,np.ones((msg_data.shape[0],1))))
- pixel_data_homogenous = np.dot(self.camera_intrinsic_matrix_,msg_data_homogenous.T).T
- pixel_data = pixel_data_homogenous[:,:2] / pixel_data_homogenous[:,2:]
- return pixel_data, depth_data
-
- def inpainting(self,rgbd_file,seg_file,gazebo_rgb,gazebo_depth):
-
- # TODO(kush): Clean this up to use actual data, not file names
- if type(rgbd_file) == str:
- rgbd = np.load(rgbd_file)
- seg = cv2.imread(seg_file,0)
- else:
- rgbd = rgbd_file
- seg = seg_file
- seg = seg.astype(np.uint8)
-
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- color_image = rgbd[:,:,0:3]
- color_image = cv2.cvtColor(color_image,cv2.COLOR_BGR2RGB)
- depth_image = rgbd[:,:,-1]
- depth_image = depth_image[:,:,np.newaxis]
- rgbd_image = np.concatenate((color_image,depth_image),axis=2)
- inverted_mask = cv2.bitwise_not(seg)
- masked_rgbd_image = cv2.bitwise_and(rgbd_image,rgbd_image,mask=inverted_mask)
- masked_rgb_image = masked_rgbd_image[:,:,0:3]
- masked_depth_image = masked_rgbd_image[:,:,-1]
-
- joined_depth = np.concatenate((gazebo_depth[np.newaxis],masked_depth_image[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- attempt = masked_rgb_image * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def fullPointcloudCallback(self,msg,depth_file,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_, dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- for coord in all_pixels:
- x,y = coord
- # TODO Change 5 pixel hardcoded offset. Sorry Professor Chen, I'll do better :)
- mask_image[round(y),round(x)-5] = white_color
- depth_image[round(y),round(x)-5] = depth_data[i]
- i += 1
- np.save('/home/benchturtle/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
-
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/benchturtle/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/benchturtle/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(depth_file,segmentation,gazebo_masked_image,depth_image)
- print("I am the Senate")
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=500)
- test_pcd_data = np.asarray(test_pcd.points)
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- #open3d_mesh = fast_mesh
- start_time = time.time()
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- open3d_mesh.rotate(R2,[0,0,0])
- open3d_mesh.translate(xyz_np)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_base_link_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,depth_file,segmentation):
- if(self.is_ready_):
- print("HERE")
- open3d_mesh = None
- start_time = time.time()
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- o3d.io.write_point_cloud('/home/benchturtle/ur5e.pcd',pcd)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "camera_color_optical_frame"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,depth_file,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- start_time = time.time()
- depth_file = msg.depth_file
- segmentation = msg.segmentation
- joints = msg.joints
- f = open(joints)
- joint_array = f.read().split(' ')
- joint_array = [float(item) for item in joint_array ]
-
- f.close()
- ee_pose = self.panda_solver_.fk(np.array(joint_array))
- qout = self.ur5e_solver_.ik(ee_pose,qinit=np.zeros(self.ur5e_solver_.number_of_joints))#self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- qout_list.append(0.0)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5e_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(depth_file,segmentation)
- end_time = time.time()
- print("Total time: " + str(end_time - start_time) + " s")
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- depth_data = msg.depth_data
-
- segmentation_data = msg.segmentation_data
- depth_data_reshaped = np.array(depth_data) \
- .reshape((segmentation_data.width, segmentation_data.height, 4))
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- joints = msg.joints
- joint_array = joints
-
- ee_pose = self.panda_solver_.fk(np.array(joint_array))
- qout = self.ur5e_solver_.ik(ee_pose,qinit=np.zeros(self.ur5e_solver_.number_of_joints))#self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- qout_list.append(0.0)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5e_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(depth_data_reshaped, segmentation_data)
- end_time = time.time()
- print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- # Fill the JointState message with data from the Float64MultiArray
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- gripper_val = msg.data[6]
- msg.data.append(gripper_val)
- msg.data.append(gripper_val)
- msg.data.append(gripper_val)
- msg.data.append(-gripper_val)
- msg.data.append(-gripper_val)
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
-
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.ur5e_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better_early_inpaint.py
similarity index 85%
rename from mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better.py
rename to mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better_early_inpaint.py
index 0a63f15..27a3b1e 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better_early_inpaint.py
@@ -3,7 +3,6 @@
import rclpy
from rclpy.node import Node
import trimesh
-import open3d as o3d
from std_msgs.msg import Header, Float64MultiArray,Bool
from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
import numpy as np
@@ -28,7 +27,6 @@
from tracikpy import TracIKSolver
from mdh.kinematic_chain import KinematicChain
from mdh import UnReachable
-import kinpy as kp
from geometry_msgs.msg import Vector3, Quaternion
from scipy.spatial.transform import Rotation as R
from message_filters import ApproximateTimeSynchronizer, Subscriber
@@ -46,7 +44,6 @@ def __init__(self):
self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
# real_camera_link to world and then multiply translation by 1000
@@ -81,31 +78,14 @@ def __init__(self):
1)
self.subscription_data_ = self.create_subscription(
InputFilesRealDataMulti,
- 'input_files_data_real_multi',
+ 'input_files_data_real',
self.listenerCallbackOnlineDebug,
1)
self.joint_state_msg = JointState()
#Harcoding start position
- self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
+ self.q_init_ = np.array([-0.5489533543586731, 0.07463289797306061, 0.5471758842468262, -2.2465689182281494, -0.12455657869577408, 2.2839372158050537, 0.7166903018951416])
+
self.publishers_ = []
self.subscribers_ = []
self.timers_ = []
@@ -168,7 +148,7 @@ def __init__(self):
self.pandaLeftDepthCallback,
1
)
-
+ self.first_time_ = True
self.ur5_right_rgb_ = None
self.ur5_right_depth_ = None
self.panda_right_rgb_ = None
@@ -201,6 +181,38 @@ def __init__(self):
1
)
+ self.panda_left_no_gripper_rgb_ = None
+ self.panda_left_no_gripper_depth_ = None
+ self.panda_right_no_gripper_rgb_ = None
+ self.panda_right_no_gripper_depth_ = None
+ self.panda_no_gripper_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_left_camera/image_raw',
+ self.pandaLeftNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_left_camera/depth/image_raw',
+ self.pandaLeftNoGripperDepthCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/image_raw',
+ self.pandaRightNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/depth/image_raw',
+ self.pandaRightNoGripperDepthCallback,
+ 1
+ )
+
self.joint_subscription_ = self.create_subscription(
JointState,
'/gazebo_joint_states',
@@ -213,37 +225,10 @@ def __init__(self):
timer_period = 0.5
self.links_info_ = []
self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
#self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
+ self.full_mask_image_publisher_ = self.create_publisher(MultipleInpaintImages,"full_mask_image",1)
self.updated_joints_ = False
self.is_ready_ = True
@@ -271,22 +256,48 @@ def pandaRightRgbCallback(self,msg):
def pandaRightDepthCallback(self,msg):
self.panda_right_depth_ = msg
+ def pandaLeftNoGripperRgbCallback(self,msg):
+ self.panda_left_no_gripper_rgb_ = msg
+
+ def pandaLeftNoGripperDepthCallback(self,msg):
+ self.panda_left_no_gripper_depth_ = msg
+
+ def pandaRightNoGripperRgbCallback(self,msg):
+ self.panda_right_no_gripper_rgb_ = msg
+
+ def pandaRightNoGripperDepthCallback(self,msg):
+ self.panda_right_no_gripper_depth_ = msg
+
def noTimeGazeboCallback(self,joint_msg):
start_time = time.time()
- left_inpainted_image = self.doFullInpainting(self.panda_left_rgb_,self.panda_left_depth_,self.ur5_left_rgb_,self.ur5_left_depth_)
- right_inpainted_image = self.doFullInpainting(self.panda_right_rgb_,self.panda_right_depth_,self.ur5_right_rgb_,self.ur5_right_depth_)
+ left_inpainted_image,left_mask = self.doFullInpainting(self.panda_left_rgb_,self.panda_left_depth_,self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_no_gripper_depth_,self.left_real_rgb_,self.left_real_depth_)
+ right_inpainted_image,right_mask = self.doFullInpainting(self.panda_right_rgb_,self.panda_right_depth_,self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_no_gripper_depth_,self.right_real_rgb_,self.right_real_depth_)
inpainted_image_msg = MultipleInpaintImages()
- inpainted_image_msg.images = [left_inpainted_image,right_inpainted_image]
+ mask_image_msg = MultipleInpaintImages()
+ inpainted_image_msg.images = [self.cv_bridge_.cv2_to_imgmsg(left_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(right_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(left_mask),self.cv_bridge_.cv2_to_imgmsg(right_mask)]
+ mask_image_msg.images = []
self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.full_mask_image_publisher_.publish(mask_image_msg)
inpaint_number = str(self.i_).zfill(5)
if not os.path.exists('left_inpainting'):
os.makedirs('left_inpainting')
if not os.path.exists('right_inpainting'):
os.makedirs('right_inpainting')
+ if not os.path.exists('left_mask'):
+ os.makedirs('left_mask')
+ if not os.path.exists('right_mask'):
+ os.makedirs('right_mask')
+ if(self.float_image_):
+ left_inpainted_image = (left_inpainted_image * 255).astype(np.uint8)
+ right_inpainted_image = (right_inpainted_image * 255).astype(np.uint8)
+ left_mask = (left_mask * 255).astype(np.uint8)
+ right_mask = (right_mask * 255).astype(np.uint8)
cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
+ cv2.imwrite('left_mask/mask'+ str(inpaint_number) +'.png',left_mask)
+ cv2.imwrite('right_mask/mask'+ str(inpaint_number) +'.png',right_mask)
- def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth):
+ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth,gazebo_no_gripper_depth,world_rgb,world_depth):
real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
@@ -296,6 +307,7 @@ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth):
cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
+ gazebo_no_gripper_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_no_gripper_depth)
real_seg_np = (real_depth_np < 8).astype(np.uint8)
real_seg_255_np = 255 * real_seg_np
@@ -303,11 +315,14 @@ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth):
gazebo_seg_255_np = 255 * gazebo_seg_np
cv2.imwrite('real_seg.png',real_seg_255_np)
cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
+ real_no_gripper_seg_255_np = (gazebo_no_gripper_depth_np < 8).astype(np.uint8)
+ real_no_gripper_seg_255_np = 255 * real_no_gripper_seg_255_np
+ cv2.imwrite('gazebo_seg_no_gripper.png',real_no_gripper_seg_255_np)
+ real_rgb = world_rgb
+ real_depth = world_depth
real_seg = real_seg_255_np
gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- return self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
+ return self.dummyInpaintingEarly(real_rgb,real_seg,real_no_gripper_seg_255_np,gazebo_rgb_np,gazebo_seg_255_np)
def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
return
@@ -528,6 +543,62 @@ def convertPointcloudToDepth(self,pointcloud):
depth_image[pixel[1],pixel[0]] = point[2]
return depth_image
+ def dummyInpaintingEarly(self,rgb,seg_file,seg_file_no_gripper,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file_no_gripper = cv2.threshold(seg_file_no_gripper, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('seg_file.png',seg_file)
+ cv2.imwrite('seg_file_no_gripper.png',seg_file_no_gripper)
+ seg_file_gripper = abs(seg_file - seg_file_no_gripper)
+ seg_file_gripper = cv2.erode(seg_file_gripper,np.ones((3,3),np.uint8),iterations=3)
+ seg_file_gripper = cv2.dilate(seg_file_gripper,np.ones((3,3),np.uint8),iterations=10)
+ cv2.imwrite('seg_file_gripper.png',seg_file_gripper)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file_no_gripper)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=15)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255 + seg_file_gripper
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre3.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ diffusion_input = cv2.bitwise_and(inpainted_image,inpainted_image,mask=cv2.bitwise_not(better_dilated_blend_mask))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ diffusion_input = (diffusion_input / 255.0).astype(np.float32)
+ return inpainted_image,diffusion_input
+
def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
if(rgb.dtype == np.float32):
@@ -540,7 +611,7 @@ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
gazebo_segmentation_mask_255 = gazebo_seg
inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
- inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=15)
cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
@@ -1081,13 +1152,18 @@ def listenerCallbackOnlineDebug(self,msg):
os.makedirs(online_input_num_folder)
left_msg = msg.data_pieces[0]
right_msg = msg.data_pieces[1]
-
+ elif(len(msg.data_pieces) == 1):
+ left_msg = msg.data_pieces[0]
+
left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
# rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
left_depth_np[np.isnan(left_depth_np)] = 0
+ if(left_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
# rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
@@ -1095,36 +1171,58 @@ def listenerCallbackOnlineDebug(self,msg):
right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
right_depth_np[np.isnan(right_depth_np)] = 0
+ if(right_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
- panda_panda_gripper_joints = np.array(msg.joints)
+ panda_panda_gripper_joints = np.array(msg.data_pieces[0].joints)
+ real_ee_gripper_joint = panda_panda_gripper_joints[-1]
+
panda_panda_gripper_joints = panda_panda_gripper_joints[:-1]
ee_pose = self.panda_panda_gripper_solver_.fk(panda_panda_gripper_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
+ end_effector_rotation_with_no_translation = np.array([[0,1,0,0],
+ [-1,0,0,0],
+ [0,0,1,0],
+ [0,0,0,1]])
ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ #end_effector_rotation_with_no_translation = np.linalg.inv(np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]]))
+ #ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ if(self.first_time_):
+ self.q_init_ = panda_panda_gripper_joints
+ qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+
+ #ee_pose = ee_pose @ end_effector_rotation_with_no_translation
scipy_rotation = R.from_matrix(ee_pose[:3,:3])
scipy_quaternion = scipy_rotation.as_quat()
+ if(self.first_time_):
+ self.q_init_ = panda_panda_gripper_joints
+ self.first_time_ = False
qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
b_xyz = 1e-5
b_rpy = 1e-3
while(qout is None):
b_xyz *= 10
b_rpy *= 10
- qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
+ qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=panda_panda_gripper_joints,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
+ if(b_xyz == 0.1):
print("Couldn't find good IK")
- qout = self.q_init_
+ qout = panda_panda_gripper_joints
print("Bound xyz: " + str(b_xyz))
self.q_init_ = qout
panda_arm_joints = panda_panda_gripper_joints.tolist()
- panda_gripper_joint = 0.04
+ panda_gripper_joint = -0.04*real_ee_gripper_joint + 0.04
panda_gripper_joints = [panda_gripper_joint] * 2
panda_ur5_arm_joints = qout.tolist()
- panda_ur5_gripper_joint = 0.0
+ panda_ur5_gripper_joint = -20 * panda_gripper_joint + 0.8
panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
full_joint_list = []
full_joint_list.extend(panda_arm_joints)
full_joint_list.extend(panda_ur5_arm_joints)
+ full_joint_list.extend(panda_gripper_joints)
+ full_joint_list.extend(panda_ur5_gripper_joints)
qout_msg = Float64MultiArray()
qout_msg.data = full_joint_list
self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better_early_inpaint_reproject.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better_early_inpaint_reproject.py
new file mode 100644
index 0000000..c0af35b
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better_early_inpaint_reproject.py
@@ -0,0 +1,1443 @@
+#!/usr/bin/env python3
+
+import rclpy
+from rclpy.node import Node
+from std_msgs.msg import Header, Float64MultiArray,Bool
+from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
+import numpy as np
+from launch_ros.substitutions import FindPackageShare
+from launch.substitutions import PathJoinSubstitution
+import os
+import glob
+import subprocess
+import xml.etree.ElementTree as ET
+from functools import partial
+import math
+from message_filters import TimeSynchronizer, Subscriber
+from tf2_ros import TransformException
+from tf2_ros.buffer import Buffer
+from tf2_ros.transform_listener import TransformListener
+from sensor_msgs_py import point_cloud2
+import cv2
+from cv_bridge import CvBridge
+import time
+from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
+from sensor_msgs.msg import JointState
+from tracikpy import TracIKSolver
+from mdh.kinematic_chain import KinematicChain
+from mdh import UnReachable
+from geometry_msgs.msg import Vector3, Quaternion
+from scipy.spatial.transform import Rotation as R
+from message_filters import ApproximateTimeSynchronizer, Subscriber
+import pathlib
+
+
+class WriteData(Node):
+ def __init__(self):
+ super().__init__('write_data_node')
+ self.is_ready_ = False
+ self.thetas_ = None
+ self.debug_ = True
+ self.float_image_ = False
+ file_directory = pathlib.Path(__file__).parent.resolve()
+ self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
+ self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
+ self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
+ self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
+
+ # real_camera_link to world and then multiply translation by 1000
+ # self.camera_to_world_ = np.array([[0,1,0,0],
+ # [0.258,0,-0.966,989],
+ # [-0.966,0,-0.258,1919],
+ # [0,0,0,1]])
+ self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
+ [0.706, 0.000 ,-0.708, 603],
+ [-0.708, 0.000, -0.706 , 1307],
+ [0,0,0,1]])
+
+ self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
+ [ 0. , 101.39696962, 42. ],
+ [ 0. , 0. , 1. ]])
+ self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
+ self.panda_joint_state_publisher_ = self.create_publisher(
+ JointState,
+ '/joint_states', # Topic name for /joint_states
+ 1)
+ self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
+ self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
+ self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
+ self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
+ self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
+ self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.ur5_rgb_image_subscriber_,self.ur5_depth_image_subscriber_,self.panda_rgb_image_subscriber_,self.panda_depth_image_subscriber_],1,0.2)
+ self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
+ self.subscription_ = self.create_subscription(
+ InputFilesRobosuite,
+ '/input_files',
+ self.listenerCallback,
+ 1)
+ self.subscription_data_ = self.create_subscription(
+ InputFilesRealDataMulti,
+ 'input_files_data_real',
+ self.listenerCallbackOnlineDebug,
+ 1)
+ self.joint_state_msg = JointState()
+
+ #Harcoding start position
+ self.q_init_ = np.array([-0.5489533543586731, 0.07463289797306061, 0.5471758842468262, -2.2465689182281494, -0.12455657869577408, 2.2839372158050537, 0.7166903018951416])
+
+ self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
+ xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
+ xacro_subprocess = subprocess.Popen(
+ xacro_command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ )
+ urdf_string = ""
+ while True:
+ line = xacro_subprocess.stdout.readline()
+ if line:
+ line_byte = line.strip()
+ line = line_byte.decode("utf-8")
+ urdf_string += line
+ else:
+ break
+ root = ET.fromstring(urdf_string)
+ self.publishers_ = []
+ self.subscribers_ = []
+ self.timers_ = []
+ self.left_real_rgb_ = None
+ self.right_real_rgb_ = None
+ self.left_real_depth_ = None
+ self.right_real_depth_ = None
+ self.real_seg_ = None
+ self.real_qout_list_ = None
+ self.i_ = -1
+ self.offline_ = False
+ self.seg_file_ = None
+ self.tf_buffer_ = Buffer()
+ self.tf_listener_ = TransformListener(self.tf_buffer_, self)
+ self.camera_intrinsic_matrix_ = None
+ self.intrinsic_matrix_ = np.array([[524.22595215, 0. , 639.77819824],
+ [ 0. , 524.22595215, 370.27804565],
+ [0,0,1]])
+ self.image_shape_ = None
+ # REMEMBER TO HARDCODE THIS
+ self.robosuite_image_shape_ = (84,84)
+ self.camera_intrinsic_subscription_ = self.create_subscription(
+ CameraInfo,
+ '/camera/camera_info',
+ self.cameraInfoCallback,
+ 1
+ )
+ self.camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/camera/image_raw',
+ self.cameraCallback,
+ 1
+ )
+
+ self.ur5_left_rgb_ = None
+ self.ur5_left_depth_ = None
+ self.panda_left_rgb_ = None
+ self.panda_left_depth_ = None
+ self.ur5_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_left_camera/image_raw',
+ self.ur5LeftRgbCallback,
+ 1
+ )
+
+ self.ur5_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_left_camera/depth/image_raw',
+ self.ur5LeftDepthCallback,
+ 1
+ )
+
+ self.panda_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_left_camera/image_raw',
+ self.pandaLeftRgbCallback,
+ 1
+ )
+
+ self.panda_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_left_camera/depth/image_raw',
+ self.pandaLeftDepthCallback,
+ 1
+ )
+ self.first_time_ = True
+ self.ur5_right_rgb_ = None
+ self.ur5_right_depth_ = None
+ self.panda_right_rgb_ = None
+ self.panda_right_depth_ = None
+ self.ur5_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_right_camera/image_raw',
+ self.ur5RightRgbCallback,
+ 1
+ )
+
+ self.ur5_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_right_camera/depth/image_raw',
+ self.ur5RightDepthCallback,
+ 1
+ )
+
+ self.panda_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/image_raw',
+ self.pandaRightRgbCallback,
+ 1
+ )
+
+ self.panda_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/depth/image_raw',
+ self.pandaRightDepthCallback,
+ 1
+ )
+
+ self.panda_left_no_gripper_rgb_ = None
+ self.panda_left_no_gripper_depth_ = None
+ self.panda_right_no_gripper_rgb_ = None
+ self.panda_right_no_gripper_depth_ = None
+ self.panda_no_gripper_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_left_camera/image_raw',
+ self.pandaLeftNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_left_camera/depth/image_raw',
+ self.pandaLeftNoGripperDepthCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/image_raw',
+ self.pandaRightNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/depth/image_raw',
+ self.pandaRightNoGripperDepthCallback,
+ 1
+ )
+
+ self.joint_subscription_ = self.create_subscription(
+ JointState,
+ '/gazebo_joint_states',
+ self.noTimeGazeboCallback,
+ 1
+ )
+ self.cv_bridge_ = CvBridge()
+ self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
+ self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
+ timer_period = 0.5
+ self.links_info_ = []
+ self.original_meshes_ = []
+
+ self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
+ self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
+ #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
+ self.full_mask_image_publisher_ = self.create_publisher(MultipleInpaintImages,"full_mask_image",1)
+ self.updated_joints_ = False
+ self.is_ready_ = True
+
+ def ur5LeftRgbCallback(self,msg):
+ self.ur5_left_rgb_ = msg
+
+ def ur5LeftDepthCallback(self,msg):
+ self.ur5_left_depth_ = msg
+
+ def pandaLeftRgbCallback(self,msg):
+ self.panda_left_rgb_ = msg
+
+ def pandaLeftDepthCallback(self,msg):
+ self.panda_left_depth_ = msg
+
+ def ur5RightRgbCallback(self,msg):
+ self.ur5_right_rgb_ = msg
+
+ def ur5RightDepthCallback(self,msg):
+ self.ur5_right_depth_ = msg
+
+ def pandaRightRgbCallback(self,msg):
+ self.panda_right_rgb_ = msg
+
+ def pandaRightDepthCallback(self,msg):
+ self.panda_right_depth_ = msg
+
+ def pandaLeftNoGripperRgbCallback(self,msg):
+ self.panda_left_no_gripper_rgb_ = msg
+
+ def pandaLeftNoGripperDepthCallback(self,msg):
+ self.panda_left_no_gripper_depth_ = msg
+
+ def pandaRightNoGripperRgbCallback(self,msg):
+ self.panda_right_no_gripper_rgb_ = msg
+
+ def pandaRightNoGripperDepthCallback(self,msg):
+ self.panda_right_no_gripper_depth_ = msg
+
+ def reproject(self,left_real_rgb,left_real_depth,reproject_tf):
+ rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_real_rgb)
+ depth_np = left_real_depth
+ depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
+ depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
+ depth_np[np.isnan(depth_np)] = 0.0001
+ fx = self.intrinsic_matrix_[0][0]
+ fy = self.intrinsic_matrix_[1][1]
+ cx = self.intrinsic_matrix_[0][2]
+ cy = self.intrinsic_matrix_[1][2]
+ u = np.tile(np.arange(rgb_np.shape[1]), (rgb_np.shape[0], 1))
+ v = np.tile(np.arange(rgb_np.shape[0]),(rgb_np.shape[1],1)).T
+ d = depth_np
+ z = d
+ x = (u - cx) * z / fx
+ y = (v - cy) * z / fy
+ xyz_np = np.stack([x,y,z],axis=-1)
+ keys = xyz_np.reshape(-1,3)
+ values = rgb_np.reshape(-1,3)
+ data_dict = dict(zip(map(tuple,keys),map(tuple,values)))
+ points = np.array([x.reshape(-1),y.reshape(-1),z.reshape(-1)]).T
+ homogenous_points = np.column_stack((points, np.ones(len(points))))
+ transformed_points = np.dot(reproject_tf, homogenous_points.T).T[:, :3]
+ point_dict = dict(zip(map(tuple,transformed_points),map(tuple,homogenous_points)))
+ new_image_mask = np.zeros((left_real_rgb.height,left_real_rgb.width)).astype(np.uint8)
+ new_image = np.zeros((left_real_rgb.height,left_real_rgb.width,3)).astype(np.uint8)
+ new_depth = np.zeros((left_real_rgb.height,left_real_rgb.width)).astype(np.float64)
+ new_fx = fx
+ new_fy = fy
+ new_cx = cx
+ new_cy = cy
+ # Vectorized computation of coordinates
+ u_coords = np.round((new_fx * transformed_points[:, 0] / transformed_points[:, 2]) + new_cx).astype(int)
+ v_coords = np.round((new_fy * transformed_points[:, 1] / transformed_points[:, 2]) + new_cy).astype(int)
+
+ valid_coords_mask = np.logical_and.reduce([
+ (0 <= u_coords), (u_coords < left_real_rgb.width),
+ (0 <= v_coords), (v_coords < left_real_rgb.height),
+ ~np.isnan(transformed_points).any(axis=1)
+ ])
+
+ # Convert lists to tuples before using them as keys
+ point_keys = tuple(map(tuple, transformed_points[valid_coords_mask].tolist()))
+ data_values = np.array([data_dict[point_dict[key][:3]] for key in point_keys], dtype=np.uint8)
+ new_image_mask[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = 255
+ new_image[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = data_values
+ new_depth[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = transformed_points[:,2][valid_coords_mask]
+
+ #new_image_mask[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = 255
+ #new_image[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = data_dict[
+ # tuple(transformed_points[valid_coords_mask].astype(int).tolist())[:3]]
+
+ inverted_image_mask = cv2.bitwise_not(new_image_mask)
+ inpainted_image = cv2.inpaint(new_image,inverted_image_mask,1,cv2.INPAINT_TELEA)
+ cv2.imwrite('image_overlap.png',new_image_mask)
+ cv2.imwrite('image_overlap_color.png',new_image)
+ cv2.imwrite('image_overlap_inpaint.png',inpainted_image)
+ cv2.imwrite('normalized_depth_new.png',self.normalize_depth_image(new_depth))
+ return self.cv_bridge_.cv2_to_imgmsg(inpainted_image),new_depth,new_image
+
+ def noTimeGazeboCallback(self,joint_msg):
+ start_time = time.time()
+ left_real_rgb = self.left_real_rgb_
+ left_real_depth = self.left_real_depth_
+ right_real_rgb = self.right_real_rgb_
+ right_real_depth = self.right_real_depth_
+ reproject_tf = np.array([[0.987,-0.136,0.083,-0.05],
+ [0.124,0.983,0.136,-0.05],
+ [-0.01,-0.124,0.987,0],
+ [0,0,0,1]])
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('reproject_images'):
+ os.makedirs('reproject_images')
+ if not os.path.exists('reproject_images/raw_rgb'):
+ os.makedirs('reproject_images/raw_rgb')
+ if not os.path.exists('reproject_images/reproject_once_'):
+ os.makedirs('reproject_images/reproject_once_')
+ if not os.path.exists('reproject_images/reproject_inpaint_once_'):
+ os.makedirs('reproject_images/reproject_inpaint_once_')
+ if not os.path.exists('reproject_images/reproject_twice_'):
+ os.makedirs('reproject_images/reproject_twice_')
+ if not os.path.exists('reproject_images/reproject_inpaint_twice_'):
+ os.makedirs('reproject_images/reproject_inpaint_twice_')
+ np.savetxt('reproject_images/camera_tf.txt',reproject_tf)
+ if(self.float_image_):
+ left_real_rgb = (self.cv_bridge_.imgmsg_to_cv2(left_real_rgb) * 255).astype(np.uint8)
+ cv2.imwrite('reproject_images/raw_rgb/raw_rgb'+ str(inpaint_number) +'.png',cv2.cvtColor(left_real_rgb,cv2.COLOR_BGR2RGB))
+ left_real_rgb = self.cv_bridge_.cv2_to_imgmsg(left_real_rgb)
+ left_real_rgb,left_real_depth,left_new_image = self.reproject(left_real_rgb,left_real_depth,reproject_tf)
+
+ first_reproject_straight_up = left_new_image
+ first_reproject_inpaint = self.cv_bridge_.imgmsg_to_cv2(left_real_rgb)
+ cv2.imwrite('reproject_images/reproject_once_/reproject'+ str(inpaint_number) +'.png',cv2.cvtColor(left_new_image,cv2.COLOR_BGR2RGB))
+ cv2.imwrite('reproject_images/reproject_inpaint_once_/reproject'+ str(inpaint_number) +'.png',cv2.cvtColor(self.cv_bridge_.imgmsg_to_cv2(left_real_rgb),cv2.COLOR_BGR2RGB))
+
+ left_real_rgb,left_real_depth,left_new_image = self.reproject(left_real_rgb,left_real_depth,np.linalg.inv(reproject_tf))
+ second_reproject_straight_up = left_new_image
+ second_reproject_inpaint = self.cv_bridge_.imgmsg_to_cv2(left_real_rgb)
+ cv2.imwrite('reproject_images/reproject_twice_/reproject'+ str(inpaint_number) +'.png',cv2.cvtColor(left_new_image,cv2.COLOR_BGR2RGB))
+ cv2.imwrite('reproject_images/reproject_inpaint_twice_/reproject'+ str(inpaint_number) +'.png',cv2.cvtColor(self.cv_bridge_.imgmsg_to_cv2(left_real_rgb),cv2.COLOR_BGR2RGB))
+ if(self.float_image_):
+ second_reproject_inpaint = (second_reproject_inpaint / 255).astype(np.float32)
+ second_reproject_straight_up = (second_reproject_straight_up / 255).astype(np.float32)
+ first_reproject_inpaint = (first_reproject_inpaint / 255).astype(np.float32)
+ first_reproject_straight_up = (first_reproject_straight_up / 255).astype(np.float32)
+ cv2.imwrite('try.png',(self.cv_bridge_.imgmsg_to_cv2(self.left_real_rgb_) * 255).astype(np.uint8))
+ self.left_real_rgb_ = self.cv_bridge_.cv2_to_imgmsg(second_reproject_inpaint)
+ cv2.imwrite('try2.png',(self.cv_bridge_.imgmsg_to_cv2(self.left_real_rgb_) * 255).astype(np.uint8))
+
+ left_inpainted_image,left_mask = self.doFullInpainting(self.panda_left_rgb_,self.panda_left_depth_,self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_no_gripper_depth_,self.left_real_rgb_,self.left_real_depth_)
+ right_inpainted_image,right_mask = self.doFullInpainting(self.panda_right_rgb_,self.panda_right_depth_,self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_no_gripper_depth_,self.right_real_rgb_,self.right_real_depth_)
+ inpainted_image_msg = MultipleInpaintImages()
+ mask_image_msg = MultipleInpaintImages()
+ inpainted_image_msg.images = [self.cv_bridge_.cv2_to_imgmsg(second_reproject_inpaint),self.cv_bridge_.cv2_to_imgmsg(second_reproject_straight_up),self.cv_bridge_.cv2_to_imgmsg(first_reproject_inpaint),self.cv_bridge_.cv2_to_imgmsg(first_reproject_straight_up),self.cv_bridge_.cv2_to_imgmsg(left_inpainted_image)]
+ mask_image_msg.images = []
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.full_mask_image_publisher_.publish(mask_image_msg)
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('left_inpainting'):
+ os.makedirs('left_inpainting')
+ if not os.path.exists('right_inpainting'):
+ os.makedirs('right_inpainting')
+ if not os.path.exists('left_mask'):
+ os.makedirs('left_mask')
+ if not os.path.exists('right_mask'):
+ os.makedirs('right_mask')
+ if(self.float_image_):
+ left_inpainted_image = (left_inpainted_image * 255).astype(np.uint8)
+ right_inpainted_image = (right_inpainted_image * 255).astype(np.uint8)
+ left_mask = (left_mask * 255).astype(np.uint8)
+ right_mask = (right_mask * 255).astype(np.uint8)
+ cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
+ cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
+ cv2.imwrite('left_mask/mask'+ str(inpaint_number) +'.png',left_mask)
+ cv2.imwrite('right_mask/mask'+ str(inpaint_number) +'.png',right_mask)
+
+ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth,gazebo_no_gripper_depth,world_rgb,world_depth):
+ real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
+ real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
+ gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
+
+ gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
+ cv2.imwrite('real_rgb.png',real_rgb_np)
+ cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
+ cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
+ cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
+ gazebo_no_gripper_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_no_gripper_depth)
+
+ real_seg_np = (real_depth_np < 8).astype(np.uint8)
+ real_seg_255_np = 255 * real_seg_np
+ gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
+ gazebo_seg_255_np = 255 * gazebo_seg_np
+ cv2.imwrite('real_seg.png',real_seg_255_np)
+ cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
+ real_no_gripper_seg_255_np = (gazebo_no_gripper_depth_np < 8).astype(np.uint8)
+ real_no_gripper_seg_255_np = 255 * real_no_gripper_seg_255_np
+ cv2.imwrite('gazebo_seg_no_gripper.png',real_no_gripper_seg_255_np)
+ real_rgb = world_rgb
+ real_depth = world_depth
+ real_seg = real_seg_255_np
+ gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
+ return self.dummyInpaintingEarly(real_rgb,real_seg,real_no_gripper_seg_255_np,gazebo_rgb_np,gazebo_seg_255_np)
+
+ def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
+ return
+ start_time = time.time()
+ real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
+ real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
+ gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
+
+ gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
+ cv2.imwrite('real_rgb.png',real_rgb_np)
+ cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
+ cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
+ cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
+
+ real_seg_np = (real_depth_np < 8).astype(np.uint8)
+ real_seg_255_np = 255 * real_seg_np
+ gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
+ gazebo_seg_255_np = 255 * gazebo_seg_np
+ cv2.imwrite('real_seg.png',real_seg_255_np)
+ cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
+ real_rgb = self.real_rgb_
+ real_depth = self.real_depth_
+ real_seg = real_seg_255_np
+ gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
+ self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
+ self.updated_joints_ = False
+ end_time = time.time()
+ print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
+ return
+ #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
+
+ #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
+ # self.updated_joints_ = True
+
+ def cameraInfoCallback(self,msg):
+ if(self.is_ready_):
+ self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
+ self.image_shape_ = (msg.height,msg.width,3)
+
+ def cameraCallback(self,msg):
+ if(self.is_ready_):
+ self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
+ #cv2.imwrite('gazebo_rgb.png',self.original_image_)
+
+ def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
+ # RPY is in ZYX I'm pretty sure
+ mesh_scene = trimesh.load(filename)
+ if(link_name == "panda_link6"):
+ meshes = mesh_scene.geometry
+ for mesh in meshes:
+ if(mesh != 'Shell006_000-mesh'):
+ vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
+ triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
+ open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
+ open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
+ meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
+ #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
+ mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
+ for g in meshes.values()))
+
+ mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
+ for g in mesh_scene.geometry.values()))
+ # Convert Trimesh to Open3D TriangleMesh
+ vertices = o3d.utility.Vector3dVector(mesh.vertices)
+ triangles = o3d.utility.Vector3iVector(mesh.faces)
+ open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
+ test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
+ test_pcd_data = np.asarray(test_pcd.points)
+
+ diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
+ # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
+
+ if(diff < 1):
+ open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
+ else:
+ R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
+ open3d_mesh.rotate(R,[0,0,0])
+ test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
+ #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
+ #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
+ rpy_str_list = rpy_str.split()
+ rpy_floats = [float(x) for x in rpy_str_list]
+ rpy_np = np.array(rpy_floats)
+ xyz_str_list = xyz_str.split()
+ xyz_floats = [float(x) for x in xyz_str_list]
+ if(link_name == "panda_link1"):
+ xyz_floats = [0, 0, -0.1929999099]
+ elif(link_name == "panda_link3"):
+ xyz_floats = [0,0,-0.1219998142]
+ elif(link_name == "panda_link5"):
+ xyz_floats = [0,0,-0.2630000007]
+ elif(link_name == "panda_link7"):
+ rpy_np = np.array([0,0,-math.pi/4])
+ xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
+ xyz_np = 1000 * np.array(xyz_floats)
+ R2 = self.eulerToR(rpy_np)
+ # Create a 4x4 identity matrix
+ transform_matrix = np.eye(4)
+
+ # Replace the top-left 3x3 submatrix with the rotation matrix
+ transform_matrix[:3, :3] = R2
+
+ # Set the first three elements in the fourth column to the translation vector
+ transform_matrix[:3, 3] = xyz_np
+
+ # Set the bottom-right element to 1
+ transform_matrix[3, 3] = 1.0
+ open3d_mesh.transform(transform_matrix)
+ return open3d_mesh
+
+ def getPixels(self,msg):
+ if(self.is_ready_):
+ msg_data = point_cloud2.read_points(msg)
+ msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
+ depth_data = msg_data[:,2]
+ camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
+ pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
+ return pixel_data, depth_data
+
+ def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
+ """
+ Helper function to project a batch of points in the world frame
+ into camera pixels using the world to camera transformation.
+
+ Args:
+ points (np.array): 3D points in world frame to project onto camera pixel locations. Should
+ be shape [..., 3].
+ world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
+ coordinates.
+ camera_height (int): height of the camera image
+ camera_width (int): width of the camera image
+
+ Return:
+ pixels (np.array): projected pixel indices of shape [..., 2]
+ """
+ assert points.shape[-1] == 3 # last dimension must be 3D
+ assert len(world_to_camera_transform.shape) == 2
+ assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
+
+ # convert points to homogenous coordinates -> (px, py, pz, 1)
+ ones_pad = np.ones(points.shape[:-1] + (1,))
+ points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
+
+ # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
+ mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
+ cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
+ pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
+
+ # re-scaling from homogenous coordinates to recover pixel values
+ # (x, y, z) -> (x / z, y / z)
+ pixels = pixels / pixels[..., 2:3]
+ pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
+ # swap first and second coordinates to get pixel indices that correspond to (height, width)
+ # and also clip pixels that are out of range of the camera image
+ pixels = np.concatenate(
+ (
+ pixels[..., 1:2].clip(0, camera_height - 1),
+ pixels[..., 0:1].clip(0, camera_width - 1),
+ ),
+ axis=-1,
+ )
+ if(pixel_to_point_dicts):
+ points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
+ pixels_to_points_robosuite = {}
+ for key,value in points_to_pixels_robosuite.items():
+ pixels_to_points_robosuite.setdefault(value,[]).append(key)
+ return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
+ else:
+ return pixels
+
+ def normalize_depth_image(self,depth_image):
+ # Define the minimum and maximum depth values in your depth image
+ min_depth = np.min(depth_image)
+ max_depth = np.max(depth_image)
+
+ # Normalize the depth image to the range [0, 1]
+ normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
+
+ # Optionally, you can scale the normalized image to a different range
+ # For example, to scale it to [0, 255] for visualization:
+ normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
+ return normalized_depth_image
+
+ def convertPointcloudToDepth(self,pointcloud):
+ if(type(pointcloud) == str):
+ pointcloud_np = np.load(pointcloud)
+ else:
+ pointcloud_np = pointcloud
+ world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
+ 1.60000000e+00],
+ [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
+ 0.00000000e+00],
+ [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
+ 1.45000000e+00],
+ [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
+ 1.00000000e+00]])
+ pcd = o3d.geometry.PointCloud()
+ pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
+ pcd.transform(np.linalg.inv(world_to_camera))
+ pointcloud_np = np.asarray(pcd.points)
+
+ # loaded = np.load('/home/lawrence/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
+
+ depth_data = pointcloud_np[:,2]
+
+ fx = self.robosuite_intrinsic_matrix_[0,0]
+ fy = self.robosuite_intrinsic_matrix_[1,1]
+ cx = self.robosuite_intrinsic_matrix_[0,2]
+ cy = self.robosuite_intrinsic_matrix_[1,2]
+ u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
+ u_pixels = np.round(u_pixels).astype(int)
+ v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
+ v_pixels = np.round(v_pixels).astype(int)
+ pixels = np.vstack((u_pixels,v_pixels)).T
+ pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
+ depth_image = np.zeros(self.robosuite_image_shape_)
+ for pixel,point in pixels_to_points.items():
+ depth_image[pixel[1],pixel[0]] = point[2]
+ return depth_image
+
+ def dummyInpaintingEarly(self,rgb,seg_file,seg_file_no_gripper,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file_no_gripper = cv2.threshold(seg_file_no_gripper, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('seg_file.png',seg_file)
+ cv2.imwrite('seg_file_no_gripper.png',seg_file_no_gripper)
+ seg_file_gripper = abs(seg_file - seg_file_no_gripper)
+ seg_file_gripper = cv2.erode(seg_file_gripper,np.ones((3,3),np.uint8),iterations=3)
+ seg_file_gripper = cv2.dilate(seg_file_gripper,np.ones((3,3),np.uint8),iterations=10)
+ cv2.imwrite('seg_file_gripper.png',seg_file_gripper)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file_no_gripper)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=15)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255 + seg_file_gripper
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre3.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ diffusion_input = cv2.bitwise_and(inpainted_image,inpainted_image,mask=cv2.bitwise_not(better_dilated_blend_mask))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ diffusion_input = (diffusion_input / 255.0).astype(np.float32)
+ return inpainted_image,diffusion_input
+
+ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=15)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ #cv2.imwrite('inverted_seg_file_original.png',inverted_seg_file_original)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ #cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
+ cv2.imwrite('background_only.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ #cv2.imwrite('no_fill.png',inpainted_image)
+
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ # cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
+
+ target_color = (39,43,44)
+ target_mask = np.zeros_like(inpainted_image)
+ target_mask[:,:] = target_color
+ inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+ # inpainted_image = cv2_inpaint_image
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('inpainting'):
+ os.makedirs('inpainting')
+ if not os.path.exists('mask'):
+ os.makedirs('mask')
+ cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
+ cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
+ mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
+ inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
+ mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.mask_image_publisher_.publish(mask_image_msg)
+
+# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
+# better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
+# target_color = (39,43,44)
+# target_mask = np.zeros_like(inpainted_image)
+# target_mask[:,:] = target_color
+# got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+# #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
+# image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
+# image_8bit = got_away
+ def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
+ return
+ if type(rgb) == str:
+ rgb_np = cv2.imread(rgb)
+ seg = cv2.imread(seg_file,0)
+ depth_np = np.load(depth)
+ return
+ else:
+ rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ depth_np = depth
+ seg = seg_file
+ _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
+ #rgb_np = cv2.resize(rgb_np,(128,128))
+ #depth_np = cv2.resize(depth_np,(128,128))
+ #seg = cv2.resize(seg,(128,128))
+ #gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ #gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ #gazebo_depth = cv2.resize(gazebo_depth,(128,128))
+ real_depth_image_unmasked = depth_np
+ real_rgb_image_unmasked = rgb_np
+ real_segmentation_mask_255 = seg
+ gazebo_robot_only_rgb = gazebo_rgb
+ gazebo_segmentation_mask_255 = gazebo_seg
+ gazebo_robot_only_depth = gazebo_depth
+ real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
+ inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
+ cv2.imwrite('inverted_real_segmentation_mask_255.png',inverted_real_segmentation_mask_255)
+ outline_mask = abs(inverted_real_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
+ real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
+ real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
+
+ # real_rgb_image_masked = cv2.inpaint(real_rgb_image_masked,outline_mask,3,cv2.INPAINT_TELEA)
+ real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
+ joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
+ joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
+ joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
+ joined_depth_argmin = np.argmin(joined_depth,axis=0)
+
+ real_rgb_image_masked_inpaint = real_rgb_image_masked
+
+
+ attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
+ inverted_joined_depth_argmin = 1 - joined_depth_argmin
+ gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
+ gazebo_robot_only_lab[:,:,0] += 100
+ gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ gazebo_robot_only_rgb = gazebo_robot_only_mod
+ attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
+
+ inpainted_image = cv2.add(attempt,attempt2)#attempt + attempt2
+
+ better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
+ better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
+ target_color = (39,43,44)
+ target_mask = np.zeros_like(inpainted_image)
+ target_mask[:,:] = target_color
+ got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+ #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
+ image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
+ image_8bit = got_away
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('inpainting'):
+ os.makedirs('inpainting')
+ if not os.path.exists('mask'):
+ os.makedirs('mask')
+ cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',image_8bit)
+ cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_blend_mask.astype(np.uint8))
+ # cv2.imwrite('small_inpaint.png',cv2.resize(image_8bit,(128,128)))
+
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
+ mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_blend_mask.astype(np.uint8),encoding="mono8")
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.mask_image_publisher_.publish(mask_image_msg)
+ ready_for_next_message = Bool()
+ ready_for_next_message.data = True
+ self.ready_for_next_input_publisher_.publish(ready_for_next_message)
+ return
+
+ transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
+ real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
+ inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
+ real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
+ real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
+ real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
+ joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
+ joined_depth[joined_depth == 0] = 1000
+ joined_depth_argmin = np.argmin(joined_depth,axis=0)
+ real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
+ attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
+ inverted_joined_depth_argmin = 1 - joined_depth_argmin
+ attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
+ inpainted_image = attempt + attempt2
+ image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
+ last_slash_index = seg_file.rfind('/')
+ underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
+ str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
+ inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
+
+ cv2.imwrite(inpaint_file,image_8bit)
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+
+ def custom_mode_filter(self,image, mask, kernel_size):
+ result = np.zeros_like(image)
+ pad = kernel_size // 2
+ for i in range(pad, image.shape[0] - pad):
+ for j in range(pad, image.shape[1] - pad):
+ if mask[i, j]:
+ neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
+ unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
+ mode_color = unique[np.argmax(counts)]
+ result[i, j] = mode_color
+
+ return result
+
+ def replace_black_with_surrounding_mode(self,img):
+
+ # Convert the image to grayscale
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+
+ # Threshold the grayscale image to identify black pixels
+ _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
+
+ # Invert the mask to mark black pixels as True (255) and others as False (0)
+ #mask = np.where(thresh == 0, 255, 0)
+ mask = thresh
+ # Apply custom mode filter to find the mode RGB value of the surrounding pixels
+ replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
+
+ return replaced_img
+
+ def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
+ pointcloud = point_cloud2.read_points(pointcloud_msg)
+ # Pointcloud is in camera frame
+ pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
+
+ gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
+ _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
+ robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
+ robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
+ robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
+ transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
+ transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
+ gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
+ for pixel in pixels_to_points_robosuite.keys():
+ points = pixels_to_points_robosuite[pixel]
+ points_np = np.array([list(point) for point in points])
+ depth_value = np.mean(points_np[:,2])
+ gazebo_pixels = []
+ for point in points:
+ gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
+ rgb_values = []
+ for gazebo_pixel in gazebo_pixels:
+ rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
+
+ rgb_values_np = np.array(rgb_values)
+ mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
+ transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
+ gazebo_depth[pixel[0],pixel[1]] = depth_value
+ transformed_gazebo_seg[pixel[0],pixel[1]] = 255
+ gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
+ gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
+ transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
+ transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
+ transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
+ transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
+ #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
+ #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
+ inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
+ transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
+ return transformed_gazebo_rgb,gazebo_depth_after
+
+ def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
+ if(self.is_ready_):
+ if(self.camera_intrinsic_matrix_ is None):
+ return
+ all_pixels,depth_data = self.getPixels(msg)
+ mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
+ clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
+ depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
+ clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
+ white_color = (255,255,255)
+ i = 0
+ block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
+ for coord in all_pixels:
+ x,y = coord
+ mask_image[round(x), round(y)] = 255
+ depth_image[round(x), round(y)] = depth_data[i]
+ if(block_background_mask[round(x),round(y)]):
+ clean_mask_image[round(x),round(y)] = 255
+ clean_depth_image[round(x),round(y)] = depth_data[i]
+ i += 1
+ cv2.imwrite('clean_mask_image.png',clean_mask_image)
+ cv2.imwrite('mask_image.png',mask_image)
+ cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
+ mask_image = clean_mask_image
+ depth_image = clean_depth_image
+ if(self.original_image_ is not None):
+ mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
+ gazebo_masked_image = np.zeros_like(self.original_image_)
+ gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
+ cv2.imwrite('original_image.png',self.original_image_)
+ cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
+ self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
+ return
+ np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
+ old_mask_image = mask_image
+ mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
+ _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
+ if(self.original_image_ is not None):
+ mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
+ im_floodfill = mask_image.copy()
+ (h,w) = mask_image.shape
+ mask = np.zeros((h+2,w+2),np.uint8)
+ cv2.floodFill(im_floodfill,mask,(0,0),255)
+ im_floodfill_inv = cv2.bitwise_not(im_floodfill)
+ im_out = mask_image | im_floodfill_inv
+ mask_image = im_out
+ # Create a new image with the same size as the original image
+ gazebo_masked_image = np.zeros_like(self.original_image_)
+
+ # Apply the gazebo_mask to the original image using element-wise multiplication
+ gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
+ gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
+ cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
+ cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
+ #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
+ ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
+ self.full_mask_image_publisher_.publish(ros_mask_image)
+ self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
+
+ def setupMesh(self,filename,link_name,rpy_str,xyz_str):
+ if(self.is_ready_):
+ # RPY is in ZYX I'm pretty sure
+ mesh_scene = trimesh.load(filename)
+ if(link_name == "panda_link6"):
+ meshes = mesh_scene.geometry
+ for mesh in meshes:
+ if(mesh != 'Shell006_000-mesh'):
+ vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
+ triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
+ open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
+ open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
+ meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
+ #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
+ mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
+ for g in meshes.values()))
+
+ mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
+ for g in mesh_scene.geometry.values()))
+ # Convert Trimesh to Open3D TriangleMesh
+ vertices = o3d.utility.Vector3dVector(mesh.vertices)
+ triangles = o3d.utility.Vector3iVector(mesh.faces)
+ open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
+ test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
+ test_pcd_data = np.asarray(test_pcd.points)
+
+ diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
+ # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
+
+ if(diff < 1):
+ open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
+ else:
+ R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
+ open3d_mesh.rotate(R,[0,0,0])
+ test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
+ #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
+ #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
+ rpy_str_list = rpy_str.split()
+ rpy_floats = [float(x) for x in rpy_str_list]
+ rpy_np = np.array(rpy_floats)
+ xyz_str_list = xyz_str.split()
+ xyz_floats = [float(x) for x in xyz_str_list]
+ if(link_name == "panda_link1"):
+ xyz_floats = [0, 0, -0.1929999099]
+ elif(link_name == "panda_link3"):
+ xyz_floats = [0,0,-0.1219998142]
+ elif(link_name == "panda_link5"):
+ xyz_floats = [0,0,-0.2630000007]
+ elif(link_name == "panda_link7"):
+ rpy_np = np.array([0,0,-math.pi/4])
+ xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
+ xyz_np = 1000 * np.array(xyz_floats)
+ R2 = self.eulerToR(rpy_np)
+ # Create a 4x4 identity matrix
+ transform_matrix = np.eye(4)
+
+ # Replace the top-left 3x3 submatrix with the rotation matrix
+ transform_matrix[:3, :3] = R2
+
+ # Set the first three elements in the fourth column to the translation vector
+ transform_matrix[:3, 3] = xyz_np
+
+ # Set the bottom-right element to 1
+ transform_matrix[3, 3] = 1.0
+ open3d_mesh.transform(transform_matrix)
+
+ transform = self.fks_[link_name]
+
+ position = Vector3()
+ quaternion = Quaternion()
+ position.x = transform.pos[0]
+ position.y = transform.pos[1]
+ position.z = transform.pos[2]
+
+ quaternion.w = transform.rot[0]
+ quaternion.x = transform.rot[1]
+ quaternion.y = transform.rot[2]
+ quaternion.z = transform.rot[3]
+
+ robot_fk = self.transformStampedToMatrix(quaternion,position)
+ t_matrix = self.camera_to_world_ @ robot_fk
+ open3d_mesh.transform(t_matrix)
+ return open3d_mesh
+
+ # REQUIRES CAMERA TO BE IN GAZEBO SCENE
+ while True:
+ try:
+ t = self.tf_buffer_.lookup_transform(
+ "camera_color_optical_frame",
+ link_name,
+ rclpy.time.Time(),
+ )
+ t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
+ open3d_mesh.transform(t_matrix)
+ return open3d_mesh
+ except TransformException as ex:
+ #TODO Change This
+ pass
+
+ def transformStampedToMatrix(self,rotation,translation):
+ if(self.is_ready_):
+ q0 = rotation.w
+ q1 = rotation.x
+ q2 = rotation.y
+ q3 = rotation.z
+ # First row of the rotation matrix
+ r00 = 2 * (q0 * q0 + q1 * q1) - 1
+ r01 = 2 * (q1 * q2 - q0 * q3)
+ r02 = 2 * (q1 * q3 + q0 * q2)
+
+ # Second row of the rotation matrix
+ r10 = 2 * (q1 * q2 + q0 * q3)
+ r11 = 2 * (q0 * q0 + q2 * q2) - 1
+ r12 = 2 * (q2 * q3 - q0 * q1)
+
+ # Third row of the rotation matrix
+ r20 = 2 * (q1 * q3 - q0 * q2)
+ r21 = 2 * (q2 * q3 + q0 * q1)
+ r22 = 2 * (q0 * q0 + q3 * q3) - 1
+ t_matrix = np.array(
+ [[r00, r01, r02,translation.x * 1000],
+ [r10, r11, r12,translation.y * 1000],
+ [r20, r21, r22,translation.z * 1000],
+ [0,0,0,1]]
+ )
+ return t_matrix
+
+ def setupMeshes(self,rgb,depth,segmentation):
+ if(self.is_ready_):
+ open3d_mesh = None
+ for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
+ if open3d_mesh is None:
+ open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
+ else:
+ open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
+ pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
+ pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
+ pcd_data = np.asarray(pcd.points)
+ np.save('panda_gazebo_pointcloud.npy',pcd_data)
+ point_cloud_msg = PointCloud2()
+ point_cloud_msg.header = Header()
+ point_cloud_msg.header.frame_id = "real_camera_link"
+ fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
+ PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
+ PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
+ ]
+ point_cloud_msg.height = 1
+ point_cloud_msg.width = len(pcd_data)
+ point_cloud_msg.fields = fields
+ point_cloud_msg.is_bigendian = False
+ point_cloud_msg.point_step = 3 * 4
+ point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
+ point_cloud_msg.is_dense = True
+ point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
+ self.full_publisher_.publish(point_cloud_msg)
+ self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
+
+ def eulerToR(self,rpy_np):
+ if(self.is_ready_):
+ rotation_x = rpy_np[0]
+ rotation_y = rpy_np[1]
+ rotation_z = rpy_np[2]
+ Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
+ Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
+ Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
+ R = np.matmul(Rz,Ry)
+ R = np.matmul(R,Rx)
+ return R
+
+ def replace_nan_with_neighbors_average(self,array):
+ def replace_function(subarray):
+ center = subarray[4]
+ if(center != np.nan):
+ return center
+ valid_values = subarray[subarray != -np.inf]
+ if valid_values.size == 0:
+ return np.nan
+ else:
+ return np.nanmean(valid_values)
+
+ return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
+
+ def listenerCallback(self,msg):
+ if self.is_ready_:
+ self.offline_ = True
+ start_time = time.time()
+ # path_array = msg.rgb.split('/')
+ # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
+ # if not os.path.exists(online_input_folder):
+ # os.makedirs(online_input_folder)
+ # online_input_num_folder = online_input_folder + '/' + path_array[-2]
+ # if not os.path.exists(online_input_num_folder):
+ # os.makedirs(online_input_num_folder)
+ # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ rgb_image = cv2.imread(msg.rgb)
+ # cv2.imwrite(input_rgb_path,rgb_image)
+ rgb_array = rgb_image.flatten().tolist()
+ depth_image = np.load(msg.depth)
+ # path_array = msg.depth.split('/')
+ # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ # np.save(input_depth_path,depth_image)
+ depth_array = depth_image.flatten().tolist()
+ self.seg_file_ = msg.segmentation
+ segmentation_image = cv2.imread(msg.segmentation)
+ # path_array = msg.segmentation.split('/')
+ # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ # cv2.imwrite(input_segmentation_path,segmentation_image)
+ segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
+ joint_array = np.load(msg.joints).tolist()
+ # path_array = msg.joints.split('/')
+ # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ # np.save(input_joints_path,np.array(joint_array))
+ input_file_robosuite_data_msg = InputFilesRobosuiteData()
+ input_file_robosuite_data_msg.rgb = rgb_array
+ input_file_robosuite_data_msg.depth_map = depth_array
+ input_file_robosuite_data_msg.segmentation = segmentation_ros_image
+ input_file_robosuite_data_msg.joints = joint_array
+ self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
+ return
+ rgb = msg.rgb
+ depth = msg.depth
+ segmentation = msg.segmentation
+ joints = msg.joints
+ joint_array = np.load(joints)
+ gripper = joint_array[-1]
+ joint_array = joint_array[:-1]
+
+ ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+ self.q_init_ = qout
+ # Hardcoded gripper
+ qout_list = qout.tolist()
+ panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
+ qout_list.append(panda_gripper_command)
+ qout_msg = Float64MultiArray()
+ qout_msg.data = qout_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.joint_commands_callback(qout_msg)
+ self.setupMeshes(rgb,depth,segmentation)
+ end_time = time.time()
+
+ def listenerCallbackOnlineDebug(self,msg):
+ if(len(msg.data_pieces) == 2):
+ start_time = time.time()
+ self.i_ += 1
+ online_input_folder = 'offline_ur5e_input'
+ if not os.path.exists(online_input_folder):
+ os.makedirs(online_input_folder)
+ online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
+ if not os.path.exists(online_input_num_folder):
+ os.makedirs(online_input_num_folder)
+ left_msg = msg.data_pieces[0]
+ right_msg = msg.data_pieces[1]
+ elif(len(msg.data_pieces) == 1):
+ left_msg = msg.data_pieces[0]
+
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
+ left_depth_np[np.isnan(left_depth_np)] = 0
+ if(left_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
+
+ right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ right_depth_np = np.array(right_msg.depth_map,dtype=np.float64).reshape((right_msg.rgb.height,right_msg.rgb.width))
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
+ right_depth_np[np.isnan(right_depth_np)] = 0
+ if(right_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
+
+ panda_panda_gripper_joints = np.array(msg.data_pieces[0].joints)
+ real_ee_gripper_joint = panda_panda_gripper_joints[-1]
+
+ panda_panda_gripper_joints = panda_panda_gripper_joints[:-1]
+ ee_pose = self.panda_panda_gripper_solver_.fk(panda_panda_gripper_joints)
+ end_effector_rotation_with_no_translation = np.array([[0,1,0,0],
+ [-1,0,0,0],
+ [0,0,1,0],
+ [0,0,0,1]])
+ ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ #end_effector_rotation_with_no_translation = np.linalg.inv(np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]]))
+ #ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ if(self.first_time_):
+ self.q_init_ = panda_panda_gripper_joints
+ qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+
+ #ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ if(self.first_time_):
+ self.q_init_ = panda_panda_gripper_joints
+ self.first_time_ = False
+ qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+ b_xyz = 1e-5
+ b_rpy = 1e-3
+ while(qout is None):
+ b_xyz *= 10
+ b_rpy *= 10
+ qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=panda_panda_gripper_joints,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
+ if(b_xyz == 0.1):
+ print("Couldn't find good IK")
+ qout = panda_panda_gripper_joints
+ print("Bound xyz: " + str(b_xyz))
+ self.q_init_ = qout
+ panda_arm_joints = panda_panda_gripper_joints.tolist()
+ panda_gripper_joint = -0.04*real_ee_gripper_joint + 0.04
+ panda_gripper_joints = [panda_gripper_joint] * 2
+ panda_ur5_arm_joints = qout.tolist()
+ panda_ur5_gripper_joint = -20 * panda_gripper_joint + 0.8
+ panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
+ full_joint_list = []
+
+ full_joint_list.extend(panda_arm_joints)
+ full_joint_list.extend(panda_ur5_arm_joints)
+ full_joint_list.extend(panda_gripper_joints)
+ full_joint_list.extend(panda_ur5_gripper_joints)
+ qout_msg = Float64MultiArray()
+ qout_msg.data = full_joint_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.left_real_rgb_ = left_msg.rgb
+ self.right_real_rgb_ = right_msg.rgb
+
+ self.left_real_depth_ = left_depth_np
+ self.right_real_depth_ = right_depth_np
+ self.real_qout_list_ = full_joint_list
+ end_time = time.time()
+ print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
+ # else:
+ # qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
+ # print("WARNING HIT IK ON FRANKA ERROR")
+
+
+ def listenerCallbackData(self,msg):
+ if self.is_ready_:
+ start_time = time.time()
+ segmentation_data = msg.segmentation
+
+ segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
+
+ rgb = msg.rgb
+ rgb = np.array(rgb) \
+ .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
+
+
+
+ depth_map = msg.depth_map
+ depth_map = np.array(depth_map) \
+ .reshape(segmentation_data.shape)
+
+
+ joint_array = msg.joints
+ gripper = joint_array[-1]
+ joint_array = joint_array[:-1]
+
+ ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+ self.q_init_ = qout
+
+ # Hardcoded gripper
+ qout_list = qout.tolist()
+ panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
+ qout_list.append(panda_gripper_command)
+ qout_msg = Float64MultiArray()
+ qout_msg.data = qout_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.joint_commands_callback(qout_msg)
+ self.setupMeshes(rgb, depth_map, segmentation_data)
+ # end_time = time.time()
+ # print("Total time: " + str(end_time - start_time) + " s")
+
+
+ def joint_commands_callback(self, msg):
+ self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
+ #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
+ self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
+ msg.data.append(msg.data[-1])
+ #msg.data.append(gripper_val)
+ #msg.data.append(gripper_val)
+ # for i in range(5):
+ # if(i == 1 or i == 4):
+ # msg.data.append(gripper_val)
+ # else:
+ # msg.data.append(gripper_val)
+ self.joint_state_msg.position = msg.data
+ self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
+ self.fks_ = self.chain_.forward_kinematics(self.thetas_)
+ self.panda_joint_state_publisher_.publish(self.joint_state_msg)
+
+
+
+
+def main(args=None):
+ rclpy.init(args=args)
+
+ write_data = WriteData()
+
+ rclpy.spin(write_data)
+
+ # Destroy the node explicitly
+ # (optional - otherwise it will be done automatically
+ # when the garbage collector destroys the node object)
+ write_data.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better_wrist_early_inpaint.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better_wrist_early_inpaint.py
new file mode 100644
index 0000000..c391781
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_multi_real_better_wrist_early_inpaint.py
@@ -0,0 +1,1342 @@
+#!/usr/bin/env python3
+
+import rclpy
+from rclpy.node import Node
+import trimesh
+from std_msgs.msg import Header, Float64MultiArray,Bool
+from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
+import numpy as np
+from launch_ros.substitutions import FindPackageShare
+from launch.substitutions import PathJoinSubstitution
+import os
+import glob
+import subprocess
+import xml.etree.ElementTree as ET
+from functools import partial
+import math
+from message_filters import TimeSynchronizer, Subscriber
+from tf2_ros import TransformException
+from tf2_ros.buffer import Buffer
+from tf2_ros.transform_listener import TransformListener
+from sensor_msgs_py import point_cloud2
+import cv2
+from cv_bridge import CvBridge
+import time
+from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
+from sensor_msgs.msg import JointState
+from tracikpy import TracIKSolver
+from mdh.kinematic_chain import KinematicChain
+from mdh import UnReachable
+from geometry_msgs.msg import Vector3, Quaternion
+from scipy.spatial.transform import Rotation as R
+from message_filters import ApproximateTimeSynchronizer, Subscriber
+import pathlib
+
+
+class WriteData(Node):
+ def __init__(self):
+ super().__init__('write_data_node')
+ self.is_ready_ = False
+ self.thetas_ = None
+ self.debug_ = True
+ self.float_image_ = False
+ file_directory = pathlib.Path(__file__).parent.resolve()
+ self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
+ self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
+ self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
+ self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
+
+ # real_camera_link to world and then multiply translation by 1000
+ # self.camera_to_world_ = np.array([[0,1,0,0],
+ # [0.258,0,-0.966,989],
+ # [-0.966,0,-0.258,1919],
+ # [0,0,0,1]])
+ self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
+ [0.706, 0.000 ,-0.708, 603],
+ [-0.708, 0.000, -0.706 , 1307],
+ [0,0,0,1]])
+
+ self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
+ [ 0. , 101.39696962, 42. ],
+ [ 0. , 0. , 1. ]])
+ self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
+ self.panda_joint_state_publisher_ = self.create_publisher(
+ JointState,
+ '/joint_states', # Topic name for /joint_states
+ 1)
+ self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
+ self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
+ self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
+ self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
+ self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
+ self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.ur5_rgb_image_subscriber_,self.ur5_depth_image_subscriber_,self.panda_rgb_image_subscriber_,self.panda_depth_image_subscriber_],1,0.2)
+ self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
+ self.subscription_ = self.create_subscription(
+ InputFilesRobosuite,
+ '/input_files',
+ self.listenerCallback,
+ 1)
+ self.subscription_data_ = self.create_subscription(
+ InputFilesRealDataMulti,
+ 'input_files_data_real',
+ self.listenerCallbackOnlineDebug,
+ 1)
+ self.joint_state_msg = JointState()
+
+ #Harcoding start position
+ self.q_init_ = np.array([-0.5489533543586731, 0.07463289797306061, 0.5471758842468262, -2.2465689182281494, -0.12455657869577408, 2.2839372158050537, 0.7166903018951416])
+
+ self.publishers_ = []
+ self.subscribers_ = []
+ self.timers_ = []
+ self.left_real_rgb_ = None
+ self.right_real_rgb_ = None
+ self.left_real_depth_ = None
+ self.right_real_depth_ = None
+ self.real_seg_ = None
+ self.real_qout_list_ = None
+ self.i_ = -1
+ self.offline_ = False
+ self.seg_file_ = None
+ self.tf_buffer_ = Buffer()
+ self.tf_listener_ = TransformListener(self.tf_buffer_, self)
+ self.camera_intrinsic_matrix_ = None
+ self.image_shape_ = None
+ # REMEMBER TO HARDCODE THIS
+ self.robosuite_image_shape_ = (84,84)
+ self.camera_intrinsic_subscription_ = self.create_subscription(
+ CameraInfo,
+ '/camera/camera_info',
+ self.cameraInfoCallback,
+ 1
+ )
+ self.camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/camera/image_raw',
+ self.cameraCallback,
+ 1
+ )
+
+ self.ur5_left_rgb_ = None
+ self.ur5_left_depth_ = None
+ self.panda_left_rgb_ = None
+ self.panda_left_depth_ = None
+ self.ur5_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_left_camera/image_raw',
+ self.ur5LeftRgbCallback,
+ 1
+ )
+
+ self.ur5_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_left_camera/depth/image_raw',
+ self.ur5LeftDepthCallback,
+ 1
+ )
+
+ self.panda_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_left_camera/image_raw',
+ self.pandaLeftRgbCallback,
+ 1
+ )
+
+ self.panda_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_left_camera/depth/image_raw',
+ self.pandaLeftDepthCallback,
+ 1
+ )
+ self.first_time_ = True
+ self.ur5_right_rgb_ = None
+ self.ur5_right_depth_ = None
+ self.panda_right_rgb_ = None
+ self.panda_right_depth_ = None
+ self.ur5_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_right_camera/image_raw',
+ self.ur5RightRgbCallback,
+ 1
+ )
+
+ self.ur5_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_right_camera/depth/image_raw',
+ self.ur5RightDepthCallback,
+ 1
+ )
+
+ self.panda_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/image_raw',
+ self.pandaRightRgbCallback,
+ 1
+ )
+
+ self.panda_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/depth/image_raw',
+ self.pandaRightDepthCallback,
+ 1
+ )
+
+ self.panda_left_no_gripper_rgb_ = None
+ self.panda_left_no_gripper_depth_ = None
+ self.panda_right_no_gripper_rgb_ = None
+ self.panda_right_no_gripper_depth_ = None
+ self.panda_no_gripper_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_left_camera/image_raw',
+ self.pandaLeftNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_left_camera/depth/image_raw',
+ self.pandaLeftNoGripperDepthCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/image_raw',
+ self.pandaRightNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/depth/image_raw',
+ self.pandaRightNoGripperDepthCallback,
+ 1
+ )
+
+ self.joint_subscription_ = self.create_subscription(
+ JointState,
+ '/gazebo_joint_states',
+ self.noTimeGazeboCallback,
+ 1
+ )
+ self.cv_bridge_ = CvBridge()
+ self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
+ self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
+ timer_period = 0.5
+ self.links_info_ = []
+ self.original_meshes_ = []
+ self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
+ self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
+ #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
+ self.full_mask_image_publisher_ = self.create_publisher(MultipleInpaintImages,"full_mask_image",1)
+ self.updated_joints_ = False
+ self.is_ready_ = True
+
+ def ur5LeftRgbCallback(self,msg):
+ self.ur5_left_rgb_ = msg
+
+ def ur5LeftDepthCallback(self,msg):
+ self.ur5_left_depth_ = msg
+
+ def pandaLeftRgbCallback(self,msg):
+ self.panda_left_rgb_ = msg
+
+ def pandaLeftDepthCallback(self,msg):
+ self.panda_left_depth_ = msg
+
+ def ur5RightRgbCallback(self,msg):
+ self.ur5_right_rgb_ = msg
+
+ def ur5RightDepthCallback(self,msg):
+ self.ur5_right_depth_ = msg
+
+ def pandaRightRgbCallback(self,msg):
+ self.panda_right_rgb_ = msg
+
+ def pandaRightDepthCallback(self,msg):
+ self.panda_right_depth_ = msg
+
+ def pandaLeftNoGripperRgbCallback(self,msg):
+ self.panda_left_no_gripper_rgb_ = msg
+
+ def pandaLeftNoGripperDepthCallback(self,msg):
+ self.panda_left_no_gripper_depth_ = msg
+
+ def pandaRightNoGripperRgbCallback(self,msg):
+ self.panda_right_no_gripper_rgb_ = msg
+
+ def pandaRightNoGripperDepthCallback(self,msg):
+ self.panda_right_no_gripper_depth_ = msg
+
+ def noTimeGazeboCallback(self,joint_msg):
+ start_time = time.time()
+ left_inpainted_image,left_mask = self.doFullInpainting(self.panda_left_rgb_,self.panda_left_depth_,self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_no_gripper_depth_,self.left_real_rgb_,self.left_real_depth_,is_wrist=True)
+ right_inpainted_image,right_mask = self.doFullInpainting(self.panda_right_rgb_,self.panda_right_depth_,self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_no_gripper_depth_,self.right_real_rgb_,self.right_real_depth_,is_wrist=False)
+ left_inpainted_image = cv2.flip(left_inpainted_image,0)
+ left_mask = cv2.flip(left_mask,0)
+
+ inpainted_image_msg = MultipleInpaintImages()
+ mask_image_msg = MultipleInpaintImages()
+ inpainted_image_msg.images = [self.cv_bridge_.cv2_to_imgmsg(left_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(right_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(left_mask),self.cv_bridge_.cv2_to_imgmsg(right_mask)]
+ mask_image_msg.images = []
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.full_mask_image_publisher_.publish(mask_image_msg)
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('left_inpainting'):
+ os.makedirs('left_inpainting')
+ if not os.path.exists('right_inpainting'):
+ os.makedirs('right_inpainting')
+ if not os.path.exists('left_mask'):
+ os.makedirs('left_mask')
+ if not os.path.exists('right_mask'):
+ os.makedirs('right_mask')
+ if(self.float_image_):
+ left_inpainted_image = (left_inpainted_image * 255).astype(np.uint8)
+ right_inpainted_image = (right_inpainted_image * 255).astype(np.uint8)
+ left_mask = (left_mask * 255).astype(np.uint8)
+ right_mask = (right_mask * 255).astype(np.uint8)
+ cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
+ cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
+ cv2.imwrite('left_mask/mask'+ str(inpaint_number) +'.png',left_mask)
+ cv2.imwrite('right_mask/mask'+ str(inpaint_number) +'.png',right_mask)
+
+ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth,gazebo_no_gripper_depth,world_rgb,world_depth,is_wrist=True):
+ real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
+ real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
+ gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
+
+ gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
+ cv2.imwrite('real_rgb.png',real_rgb_np)
+ cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
+ cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
+ cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
+ gazebo_no_gripper_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_no_gripper_depth)
+
+ real_seg_np = (real_depth_np < 8).astype(np.uint8)
+ real_seg_255_np = 255 * real_seg_np
+ gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
+ gazebo_seg_255_np = 255 * gazebo_seg_np
+ cv2.imwrite('real_seg.png',real_seg_255_np)
+ cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
+ real_no_gripper_seg_255_np = (gazebo_no_gripper_depth_np < 8).astype(np.uint8)
+ real_no_gripper_seg_255_np = 255 * real_no_gripper_seg_255_np
+ cv2.imwrite('gazebo_seg_no_gripper.png',real_no_gripper_seg_255_np)
+ real_rgb = world_rgb
+ real_depth = world_depth
+ real_seg = real_seg_255_np
+ gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
+ return self.dummyInpaintingEarly(real_rgb,real_seg,real_no_gripper_seg_255_np,gazebo_rgb_np,gazebo_seg_255_np,is_wrist)
+
+ def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
+ return
+ start_time = time.time()
+ real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
+ real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
+ gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
+
+ gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
+ cv2.imwrite('real_rgb.png',real_rgb_np)
+ cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
+ cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
+ cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
+
+ real_seg_np = (real_depth_np < 8).astype(np.uint8)
+ real_seg_255_np = 255 * real_seg_np
+ gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
+ gazebo_seg_255_np = 255 * gazebo_seg_np
+ cv2.imwrite('real_seg.png',real_seg_255_np)
+ cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
+ real_rgb = self.real_rgb_
+ real_depth = self.real_depth_
+ real_seg = real_seg_255_np
+ gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
+ self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
+ self.updated_joints_ = False
+ end_time = time.time()
+ print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
+ return
+ #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
+
+ #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
+ # self.updated_joints_ = True
+
+ def cameraInfoCallback(self,msg):
+ if(self.is_ready_):
+ self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
+ self.image_shape_ = (msg.height,msg.width,3)
+
+ def cameraCallback(self,msg):
+ if(self.is_ready_):
+ self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
+ #cv2.imwrite('gazebo_rgb.png',self.original_image_)
+
+ def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
+ # RPY is in ZYX I'm pretty sure
+ mesh_scene = trimesh.load(filename)
+ if(link_name == "panda_link6"):
+ meshes = mesh_scene.geometry
+ for mesh in meshes:
+ if(mesh != 'Shell006_000-mesh'):
+ vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
+ triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
+ open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
+ open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
+ meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
+ #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
+ mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
+ for g in meshes.values()))
+
+ mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
+ for g in mesh_scene.geometry.values()))
+ # Convert Trimesh to Open3D TriangleMesh
+ vertices = o3d.utility.Vector3dVector(mesh.vertices)
+ triangles = o3d.utility.Vector3iVector(mesh.faces)
+ open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
+ test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
+ test_pcd_data = np.asarray(test_pcd.points)
+
+ diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
+ # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
+
+ if(diff < 1):
+ open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
+ else:
+ R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
+ open3d_mesh.rotate(R,[0,0,0])
+ test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
+ #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
+ #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
+ rpy_str_list = rpy_str.split()
+ rpy_floats = [float(x) for x in rpy_str_list]
+ rpy_np = np.array(rpy_floats)
+ xyz_str_list = xyz_str.split()
+ xyz_floats = [float(x) for x in xyz_str_list]
+ if(link_name == "panda_link1"):
+ xyz_floats = [0, 0, -0.1929999099]
+ elif(link_name == "panda_link3"):
+ xyz_floats = [0,0,-0.1219998142]
+ elif(link_name == "panda_link5"):
+ xyz_floats = [0,0,-0.2630000007]
+ elif(link_name == "panda_link7"):
+ rpy_np = np.array([0,0,-math.pi/4])
+ xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
+ xyz_np = 1000 * np.array(xyz_floats)
+ R2 = self.eulerToR(rpy_np)
+ # Create a 4x4 identity matrix
+ transform_matrix = np.eye(4)
+
+ # Replace the top-left 3x3 submatrix with the rotation matrix
+ transform_matrix[:3, :3] = R2
+
+ # Set the first three elements in the fourth column to the translation vector
+ transform_matrix[:3, 3] = xyz_np
+
+ # Set the bottom-right element to 1
+ transform_matrix[3, 3] = 1.0
+ open3d_mesh.transform(transform_matrix)
+ return open3d_mesh
+
+ def getPixels(self,msg):
+ if(self.is_ready_):
+ msg_data = point_cloud2.read_points(msg)
+ msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
+ depth_data = msg_data[:,2]
+ camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
+ pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
+ return pixel_data, depth_data
+
+ def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
+ """
+ Helper function to project a batch of points in the world frame
+ into camera pixels using the world to camera transformation.
+
+ Args:
+ points (np.array): 3D points in world frame to project onto camera pixel locations. Should
+ be shape [..., 3].
+ world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
+ coordinates.
+ camera_height (int): height of the camera image
+ camera_width (int): width of the camera image
+
+ Return:
+ pixels (np.array): projected pixel indices of shape [..., 2]
+ """
+ assert points.shape[-1] == 3 # last dimension must be 3D
+ assert len(world_to_camera_transform.shape) == 2
+ assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
+
+ # convert points to homogenous coordinates -> (px, py, pz, 1)
+ ones_pad = np.ones(points.shape[:-1] + (1,))
+ points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
+
+ # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
+ mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
+ cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
+ pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
+
+ # re-scaling from homogenous coordinates to recover pixel values
+ # (x, y, z) -> (x / z, y / z)
+ pixels = pixels / pixels[..., 2:3]
+ pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
+ # swap first and second coordinates to get pixel indices that correspond to (height, width)
+ # and also clip pixels that are out of range of the camera image
+ pixels = np.concatenate(
+ (
+ pixels[..., 1:2].clip(0, camera_height - 1),
+ pixels[..., 0:1].clip(0, camera_width - 1),
+ ),
+ axis=-1,
+ )
+ if(pixel_to_point_dicts):
+ points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
+ pixels_to_points_robosuite = {}
+ for key,value in points_to_pixels_robosuite.items():
+ pixels_to_points_robosuite.setdefault(value,[]).append(key)
+ return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
+ else:
+ return pixels
+
+ def normalize_depth_image(self,depth_image):
+ # Define the minimum and maximum depth values in your depth image
+ min_depth = np.min(depth_image)
+ max_depth = np.max(depth_image)
+
+ # Normalize the depth image to the range [0, 1]
+ normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
+
+ # Optionally, you can scale the normalized image to a different range
+ # For example, to scale it to [0, 255] for visualization:
+ normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
+ return normalized_depth_image
+
+ def convertPointcloudToDepth(self,pointcloud):
+ if(type(pointcloud) == str):
+ pointcloud_np = np.load(pointcloud)
+ else:
+ pointcloud_np = pointcloud
+ world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
+ 1.60000000e+00],
+ [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
+ 0.00000000e+00],
+ [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
+ 1.45000000e+00],
+ [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
+ 1.00000000e+00]])
+ pcd = o3d.geometry.PointCloud()
+ pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
+ pcd.transform(np.linalg.inv(world_to_camera))
+ pointcloud_np = np.asarray(pcd.points)
+
+ # loaded = np.load('/home/lawrence/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
+
+ depth_data = pointcloud_np[:,2]
+
+ fx = self.robosuite_intrinsic_matrix_[0,0]
+ fy = self.robosuite_intrinsic_matrix_[1,1]
+ cx = self.robosuite_intrinsic_matrix_[0,2]
+ cy = self.robosuite_intrinsic_matrix_[1,2]
+ u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
+ u_pixels = np.round(u_pixels).astype(int)
+ v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
+ v_pixels = np.round(v_pixels).astype(int)
+ pixels = np.vstack((u_pixels,v_pixels)).T
+ pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
+ depth_image = np.zeros(self.robosuite_image_shape_)
+ for pixel,point in pixels_to_points.items():
+ depth_image[pixel[1],pixel[0]] = point[2]
+ return depth_image
+
+ def dummyInpaintingEarly(self,rgb,seg_file,seg_file_no_gripper,gazebo_rgb,gazebo_seg,is_wrist=True):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+
+ if is_wrist:
+ cv2.imwrite('seg_file.png',seg_file)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=15)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ else:
+ _, seg_file_no_gripper = cv2.threshold(seg_file_no_gripper, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('seg_file.png',seg_file)
+ cv2.imwrite('seg_file_no_gripper.png',seg_file_no_gripper)
+ seg_file_gripper = abs(seg_file - seg_file_no_gripper)
+ seg_file_gripper = cv2.erode(seg_file_gripper,np.ones((3,3),np.uint8),iterations=3)
+ seg_file_gripper = cv2.dilate(seg_file_gripper,np.ones((3,3),np.uint8),iterations=10)
+ cv2.imwrite('seg_file_gripper.png',seg_file_gripper)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file_no_gripper)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=30)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255 + seg_file_gripper
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+
+ if(is_wrist):
+ pixel_shift = 50
+ chopped = gazebo_only[pixel_shift:,:]
+ black_rows = np.zeros((pixel_shift, gazebo_only.shape[1], 3), dtype=np.uint8)
+ gazebo_only = np.vstack((chopped,black_rows))
+
+ chopped_mask = gazebo_segmentation_mask_255[pixel_shift:,:]
+ black_rows_mask = np.zeros((pixel_shift, gazebo_segmentation_mask_255.shape[1]), dtype=np.uint8)
+ gazebo_segmentation_mask_255 = np.vstack((chopped_mask,black_rows_mask))
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ if(not is_wrist):
+ #Slight calibration error
+ inverted_segmentation_mask_255 = cv2.bitwise_not(np.roll(cv2.bitwise_not(inverted_segmentation_mask_255),20,axis=1))
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255,np.ones((3,3),np.uint8),iterations=10)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre3.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ diffusion_input = cv2.bitwise_and(inpainted_image,inpainted_image,mask=cv2.bitwise_not(better_dilated_blend_mask))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ diffusion_input = (diffusion_input / 255.0).astype(np.float32)
+ return inpainted_image,diffusion_input
+
+ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=15)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ #cv2.imwrite('inverted_seg_file_original.png',inverted_seg_file_original)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ #cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
+ cv2.imwrite('background_only.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ #cv2.imwrite('no_fill.png',inpainted_image)
+
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ # cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
+
+ target_color = (39,43,44)
+ target_mask = np.zeros_like(inpainted_image)
+ target_mask[:,:] = target_color
+ inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+ # inpainted_image = cv2_inpaint_image
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('inpainting'):
+ os.makedirs('inpainting')
+ if not os.path.exists('mask'):
+ os.makedirs('mask')
+ cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
+ cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
+ mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
+ inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
+ mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.mask_image_publisher_.publish(mask_image_msg)
+
+# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
+# better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
+# target_color = (39,43,44)
+# target_mask = np.zeros_like(inpainted_image)
+# target_mask[:,:] = target_color
+# got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+# #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
+# image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
+# image_8bit = got_away
+ def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
+ return
+ if type(rgb) == str:
+ rgb_np = cv2.imread(rgb)
+ seg = cv2.imread(seg_file,0)
+ depth_np = np.load(depth)
+ return
+ else:
+ rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ depth_np = depth
+ seg = seg_file
+ _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
+ #rgb_np = cv2.resize(rgb_np,(128,128))
+ #depth_np = cv2.resize(depth_np,(128,128))
+ #seg = cv2.resize(seg,(128,128))
+ #gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ #gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ #gazebo_depth = cv2.resize(gazebo_depth,(128,128))
+ real_depth_image_unmasked = depth_np
+ real_rgb_image_unmasked = rgb_np
+ real_segmentation_mask_255 = seg
+ gazebo_robot_only_rgb = gazebo_rgb
+ gazebo_segmentation_mask_255 = gazebo_seg
+ gazebo_robot_only_depth = gazebo_depth
+ real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
+ inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
+ cv2.imwrite('inverted_real_segmentation_mask_255.png',inverted_real_segmentation_mask_255)
+ outline_mask = abs(inverted_real_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
+ real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
+ real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
+
+ # real_rgb_image_masked = cv2.inpaint(real_rgb_image_masked,outline_mask,3,cv2.INPAINT_TELEA)
+ real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
+ joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
+ joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
+ joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
+ joined_depth_argmin = np.argmin(joined_depth,axis=0)
+
+ real_rgb_image_masked_inpaint = real_rgb_image_masked
+
+
+ attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
+ inverted_joined_depth_argmin = 1 - joined_depth_argmin
+ gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
+ gazebo_robot_only_lab[:,:,0] += 100
+ gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ gazebo_robot_only_rgb = gazebo_robot_only_mod
+ attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
+
+ inpainted_image = cv2.add(attempt,attempt2)#attempt + attempt2
+
+ better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
+ better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
+ target_color = (39,43,44)
+ target_mask = np.zeros_like(inpainted_image)
+ target_mask[:,:] = target_color
+ got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+ #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
+ image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
+ image_8bit = got_away
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('inpainting'):
+ os.makedirs('inpainting')
+ if not os.path.exists('mask'):
+ os.makedirs('mask')
+ cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',image_8bit)
+ cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_blend_mask.astype(np.uint8))
+ # cv2.imwrite('small_inpaint.png',cv2.resize(image_8bit,(128,128)))
+
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
+ mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_blend_mask.astype(np.uint8),encoding="mono8")
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.mask_image_publisher_.publish(mask_image_msg)
+ ready_for_next_message = Bool()
+ ready_for_next_message.data = True
+ self.ready_for_next_input_publisher_.publish(ready_for_next_message)
+ return
+
+ transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
+ real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
+ inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
+ real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
+ real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
+ real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
+ joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
+ joined_depth[joined_depth == 0] = 1000
+ joined_depth_argmin = np.argmin(joined_depth,axis=0)
+ real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
+ attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
+ inverted_joined_depth_argmin = 1 - joined_depth_argmin
+ attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
+ inpainted_image = attempt + attempt2
+ image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
+ last_slash_index = seg_file.rfind('/')
+ underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
+ str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
+ inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
+
+ cv2.imwrite(inpaint_file,image_8bit)
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+
+ def custom_mode_filter(self,image, mask, kernel_size):
+ result = np.zeros_like(image)
+ pad = kernel_size // 2
+ for i in range(pad, image.shape[0] - pad):
+ for j in range(pad, image.shape[1] - pad):
+ if mask[i, j]:
+ neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
+ unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
+ mode_color = unique[np.argmax(counts)]
+ result[i, j] = mode_color
+
+ return result
+
+ def replace_black_with_surrounding_mode(self,img):
+
+ # Convert the image to grayscale
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+
+ # Threshold the grayscale image to identify black pixels
+ _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
+
+ # Invert the mask to mark black pixels as True (255) and others as False (0)
+ #mask = np.where(thresh == 0, 255, 0)
+ mask = thresh
+ # Apply custom mode filter to find the mode RGB value of the surrounding pixels
+ replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
+
+ return replaced_img
+
+ def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
+ pointcloud = point_cloud2.read_points(pointcloud_msg)
+ # Pointcloud is in camera frame
+ pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
+
+ gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
+ _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
+ robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
+ robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
+ robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
+ transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
+ transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
+ gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
+ for pixel in pixels_to_points_robosuite.keys():
+ points = pixels_to_points_robosuite[pixel]
+ points_np = np.array([list(point) for point in points])
+ depth_value = np.mean(points_np[:,2])
+ gazebo_pixels = []
+ for point in points:
+ gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
+ rgb_values = []
+ for gazebo_pixel in gazebo_pixels:
+ rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
+
+ rgb_values_np = np.array(rgb_values)
+ mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
+ transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
+ gazebo_depth[pixel[0],pixel[1]] = depth_value
+ transformed_gazebo_seg[pixel[0],pixel[1]] = 255
+ gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
+ gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
+ transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
+ transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
+ transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
+ transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
+ #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
+ #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
+ inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
+ transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
+ return transformed_gazebo_rgb,gazebo_depth_after
+
+ def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
+ if(self.is_ready_):
+ if(self.camera_intrinsic_matrix_ is None):
+ return
+ all_pixels,depth_data = self.getPixels(msg)
+ mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
+ clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
+ depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
+ clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
+ white_color = (255,255,255)
+ i = 0
+ block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
+ for coord in all_pixels:
+ x,y = coord
+ mask_image[round(x), round(y)] = 255
+ depth_image[round(x), round(y)] = depth_data[i]
+ if(block_background_mask[round(x),round(y)]):
+ clean_mask_image[round(x),round(y)] = 255
+ clean_depth_image[round(x),round(y)] = depth_data[i]
+ i += 1
+ cv2.imwrite('clean_mask_image.png',clean_mask_image)
+ cv2.imwrite('mask_image.png',mask_image)
+ cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
+ mask_image = clean_mask_image
+ depth_image = clean_depth_image
+ if(self.original_image_ is not None):
+ mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
+ gazebo_masked_image = np.zeros_like(self.original_image_)
+ gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
+ cv2.imwrite('original_image.png',self.original_image_)
+ cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
+ self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
+ return
+ np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
+ old_mask_image = mask_image
+ mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
+ _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
+ if(self.original_image_ is not None):
+ mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
+ im_floodfill = mask_image.copy()
+ (h,w) = mask_image.shape
+ mask = np.zeros((h+2,w+2),np.uint8)
+ cv2.floodFill(im_floodfill,mask,(0,0),255)
+ im_floodfill_inv = cv2.bitwise_not(im_floodfill)
+ im_out = mask_image | im_floodfill_inv
+ mask_image = im_out
+ # Create a new image with the same size as the original image
+ gazebo_masked_image = np.zeros_like(self.original_image_)
+
+ # Apply the gazebo_mask to the original image using element-wise multiplication
+ gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
+ gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
+ cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
+ cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
+ #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
+ ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
+ self.full_mask_image_publisher_.publish(ros_mask_image)
+ self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
+
+ def setupMesh(self,filename,link_name,rpy_str,xyz_str):
+ if(self.is_ready_):
+ # RPY is in ZYX I'm pretty sure
+ mesh_scene = trimesh.load(filename)
+ if(link_name == "panda_link6"):
+ meshes = mesh_scene.geometry
+ for mesh in meshes:
+ if(mesh != 'Shell006_000-mesh'):
+ vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
+ triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
+ open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
+ open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
+ meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
+ #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
+ mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
+ for g in meshes.values()))
+
+ mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
+ for g in mesh_scene.geometry.values()))
+ # Convert Trimesh to Open3D TriangleMesh
+ vertices = o3d.utility.Vector3dVector(mesh.vertices)
+ triangles = o3d.utility.Vector3iVector(mesh.faces)
+ open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
+ test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
+ test_pcd_data = np.asarray(test_pcd.points)
+
+ diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
+ # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
+
+ if(diff < 1):
+ open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
+ else:
+ R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
+ open3d_mesh.rotate(R,[0,0,0])
+ test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
+ #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
+ #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
+ rpy_str_list = rpy_str.split()
+ rpy_floats = [float(x) for x in rpy_str_list]
+ rpy_np = np.array(rpy_floats)
+ xyz_str_list = xyz_str.split()
+ xyz_floats = [float(x) for x in xyz_str_list]
+ if(link_name == "panda_link1"):
+ xyz_floats = [0, 0, -0.1929999099]
+ elif(link_name == "panda_link3"):
+ xyz_floats = [0,0,-0.1219998142]
+ elif(link_name == "panda_link5"):
+ xyz_floats = [0,0,-0.2630000007]
+ elif(link_name == "panda_link7"):
+ rpy_np = np.array([0,0,-math.pi/4])
+ xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
+ xyz_np = 1000 * np.array(xyz_floats)
+ R2 = self.eulerToR(rpy_np)
+ # Create a 4x4 identity matrix
+ transform_matrix = np.eye(4)
+
+ # Replace the top-left 3x3 submatrix with the rotation matrix
+ transform_matrix[:3, :3] = R2
+
+ # Set the first three elements in the fourth column to the translation vector
+ transform_matrix[:3, 3] = xyz_np
+
+ # Set the bottom-right element to 1
+ transform_matrix[3, 3] = 1.0
+ open3d_mesh.transform(transform_matrix)
+
+ transform = self.fks_[link_name]
+
+ position = Vector3()
+ quaternion = Quaternion()
+ position.x = transform.pos[0]
+ position.y = transform.pos[1]
+ position.z = transform.pos[2]
+
+ quaternion.w = transform.rot[0]
+ quaternion.x = transform.rot[1]
+ quaternion.y = transform.rot[2]
+ quaternion.z = transform.rot[3]
+
+ robot_fk = self.transformStampedToMatrix(quaternion,position)
+ t_matrix = self.camera_to_world_ @ robot_fk
+ open3d_mesh.transform(t_matrix)
+ return open3d_mesh
+
+ # REQUIRES CAMERA TO BE IN GAZEBO SCENE
+ while True:
+ try:
+ t = self.tf_buffer_.lookup_transform(
+ "camera_color_optical_frame",
+ link_name,
+ rclpy.time.Time(),
+ )
+ t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
+ open3d_mesh.transform(t_matrix)
+ return open3d_mesh
+ except TransformException as ex:
+ #TODO Change This
+ pass
+
+ def transformStampedToMatrix(self,rotation,translation):
+ if(self.is_ready_):
+ q0 = rotation.w
+ q1 = rotation.x
+ q2 = rotation.y
+ q3 = rotation.z
+ # First row of the rotation matrix
+ r00 = 2 * (q0 * q0 + q1 * q1) - 1
+ r01 = 2 * (q1 * q2 - q0 * q3)
+ r02 = 2 * (q1 * q3 + q0 * q2)
+
+ # Second row of the rotation matrix
+ r10 = 2 * (q1 * q2 + q0 * q3)
+ r11 = 2 * (q0 * q0 + q2 * q2) - 1
+ r12 = 2 * (q2 * q3 - q0 * q1)
+
+ # Third row of the rotation matrix
+ r20 = 2 * (q1 * q3 - q0 * q2)
+ r21 = 2 * (q2 * q3 + q0 * q1)
+ r22 = 2 * (q0 * q0 + q3 * q3) - 1
+ t_matrix = np.array(
+ [[r00, r01, r02,translation.x * 1000],
+ [r10, r11, r12,translation.y * 1000],
+ [r20, r21, r22,translation.z * 1000],
+ [0,0,0,1]]
+ )
+ return t_matrix
+
+ def setupMeshes(self,rgb,depth,segmentation):
+ if(self.is_ready_):
+ open3d_mesh = None
+ for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
+ if open3d_mesh is None:
+ open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
+ else:
+ open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
+ pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
+ pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
+ pcd_data = np.asarray(pcd.points)
+ np.save('panda_gazebo_pointcloud.npy',pcd_data)
+ point_cloud_msg = PointCloud2()
+ point_cloud_msg.header = Header()
+ point_cloud_msg.header.frame_id = "real_camera_link"
+ fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
+ PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
+ PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
+ ]
+ point_cloud_msg.height = 1
+ point_cloud_msg.width = len(pcd_data)
+ point_cloud_msg.fields = fields
+ point_cloud_msg.is_bigendian = False
+ point_cloud_msg.point_step = 3 * 4
+ point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
+ point_cloud_msg.is_dense = True
+ point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
+ self.full_publisher_.publish(point_cloud_msg)
+ self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
+
+ def eulerToR(self,rpy_np):
+ if(self.is_ready_):
+ rotation_x = rpy_np[0]
+ rotation_y = rpy_np[1]
+ rotation_z = rpy_np[2]
+ Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
+ Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
+ Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
+ R = np.matmul(Rz,Ry)
+ R = np.matmul(R,Rx)
+ return R
+
+ def replace_nan_with_neighbors_average(self,array):
+ def replace_function(subarray):
+ center = subarray[4]
+ if(center != np.nan):
+ return center
+ valid_values = subarray[subarray != -np.inf]
+ if valid_values.size == 0:
+ return np.nan
+ else:
+ return np.nanmean(valid_values)
+
+ return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
+
+ def listenerCallback(self,msg):
+ if self.is_ready_:
+ self.offline_ = True
+ start_time = time.time()
+ # path_array = msg.rgb.split('/')
+ # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
+ # if not os.path.exists(online_input_folder):
+ # os.makedirs(online_input_folder)
+ # online_input_num_folder = online_input_folder + '/' + path_array[-2]
+ # if not os.path.exists(online_input_num_folder):
+ # os.makedirs(online_input_num_folder)
+ # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ rgb_image = cv2.imread(msg.rgb)
+ # cv2.imwrite(input_rgb_path,rgb_image)
+ rgb_array = rgb_image.flatten().tolist()
+ depth_image = np.load(msg.depth)
+ # path_array = msg.depth.split('/')
+ # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ # np.save(input_depth_path,depth_image)
+ depth_array = depth_image.flatten().tolist()
+ self.seg_file_ = msg.segmentation
+ segmentation_image = cv2.imread(msg.segmentation)
+ # path_array = msg.segmentation.split('/')
+ # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ # cv2.imwrite(input_segmentation_path,segmentation_image)
+ segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
+ joint_array = np.load(msg.joints).tolist()
+ # path_array = msg.joints.split('/')
+ # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ # np.save(input_joints_path,np.array(joint_array))
+ input_file_robosuite_data_msg = InputFilesRobosuiteData()
+ input_file_robosuite_data_msg.rgb = rgb_array
+ input_file_robosuite_data_msg.depth_map = depth_array
+ input_file_robosuite_data_msg.segmentation = segmentation_ros_image
+ input_file_robosuite_data_msg.joints = joint_array
+ self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
+ return
+ rgb = msg.rgb
+ depth = msg.depth
+ segmentation = msg.segmentation
+ joints = msg.joints
+ joint_array = np.load(joints)
+ gripper = joint_array[-1]
+ joint_array = joint_array[:-1]
+
+ ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+ self.q_init_ = qout
+ # Hardcoded gripper
+ qout_list = qout.tolist()
+ panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
+ qout_list.append(panda_gripper_command)
+ qout_msg = Float64MultiArray()
+ qout_msg.data = qout_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.joint_commands_callback(qout_msg)
+ self.setupMeshes(rgb,depth,segmentation)
+ end_time = time.time()
+
+ def listenerCallbackOnlineDebug(self,msg):
+ if(len(msg.data_pieces) == 2):
+ start_time = time.time()
+ self.i_ += 1
+ online_input_folder = 'offline_ur5e_input'
+ if not os.path.exists(online_input_folder):
+ os.makedirs(online_input_folder)
+ online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
+ if not os.path.exists(online_input_num_folder):
+ os.makedirs(online_input_num_folder)
+ left_msg = msg.data_pieces[0]
+ right_msg = msg.data_pieces[1]
+ elif(len(msg.data_pieces) == 1):
+ start_time = time.time()
+ left_msg = msg.data_pieces[0]
+
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ if(left_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
+ left_rgb_np = cv2.flip(left_rgb_np,0)
+
+ right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
+
+ if(right_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
+
+ panda_panda_gripper_joints = np.array(msg.data_pieces[0].joints)
+ real_ee_gripper_joint = panda_panda_gripper_joints[-1]
+
+ panda_panda_gripper_joints = panda_panda_gripper_joints[:-1]
+ ee_pose = self.panda_panda_gripper_solver_.fk(panda_panda_gripper_joints)
+ end_effector_rotation_with_no_translation = np.array([[0,1,0,0],
+ [-1,0,0,0],
+ [0,0,1,0],
+ [0,0,0,1]])
+ ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ #end_effector_rotation_with_no_translation = np.linalg.inv(np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]]))
+ #ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ if(self.first_time_):
+ self.q_init_ = panda_panda_gripper_joints
+ qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+
+ #ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ if(self.first_time_):
+ self.q_init_ = panda_panda_gripper_joints
+ self.first_time_ = False
+ qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+ b_xyz = 1e-5
+ b_rpy = 1e-3
+ while(qout is None):
+ b_xyz *= 10
+ b_rpy *= 10
+ qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=panda_panda_gripper_joints,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
+ if(b_xyz == 0.1):
+ print("Couldn't find good IK")
+ qout = panda_panda_gripper_joints
+ print("Bound xyz: " + str(b_xyz))
+ self.q_init_ = qout
+ panda_arm_joints = panda_panda_gripper_joints.tolist()
+ panda_gripper_joint = -0.04*real_ee_gripper_joint + 0.04
+ panda_gripper_joints = [panda_gripper_joint] * 2
+ panda_ur5_arm_joints = qout.tolist()
+ panda_ur5_gripper_joint = -20 * panda_gripper_joint + 0.8
+ panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
+ full_joint_list = []
+
+ full_joint_list.extend(panda_arm_joints)
+ full_joint_list.extend(panda_ur5_arm_joints)
+ full_joint_list.extend(panda_gripper_joints)
+ full_joint_list.extend(panda_ur5_gripper_joints)
+ qout_msg = Float64MultiArray()
+ qout_msg.data = full_joint_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.left_real_rgb_ = self.cv_bridge_.cv2_to_imgmsg(left_rgb_np)
+ self.right_real_rgb_ = self.cv_bridge_.cv2_to_imgmsg(right_rgb_np)
+
+ self.left_real_depth_ = None
+ self.right_real_depth_ = None
+ self.real_qout_list_ = full_joint_list
+ end_time = time.time()
+ print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
+ # else:
+ # qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
+ # print("WARNING HIT IK ON FRANKA ERROR")
+
+
+ def listenerCallbackData(self,msg):
+ if self.is_ready_:
+ start_time = time.time()
+ segmentation_data = msg.segmentation
+
+ segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
+
+ rgb = msg.rgb
+ rgb = np.array(rgb) \
+ .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
+
+
+
+ depth_map = msg.depth_map
+ depth_map = np.array(depth_map) \
+ .reshape(segmentation_data.shape)
+
+
+ joint_array = msg.joints
+ gripper = joint_array[-1]
+ joint_array = joint_array[:-1]
+
+ ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+ self.q_init_ = qout
+
+ # Hardcoded gripper
+ qout_list = qout.tolist()
+ panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
+ qout_list.append(panda_gripper_command)
+ qout_msg = Float64MultiArray()
+ qout_msg.data = qout_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.joint_commands_callback(qout_msg)
+ self.setupMeshes(rgb, depth_map, segmentation_data)
+ # end_time = time.time()
+ # print("Total time: " + str(end_time - start_time) + " s")
+
+
+ def joint_commands_callback(self, msg):
+ self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
+ #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
+ self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
+ msg.data.append(msg.data[-1])
+ #msg.data.append(gripper_val)
+ #msg.data.append(gripper_val)
+ # for i in range(5):
+ # if(i == 1 or i == 4):
+ # msg.data.append(gripper_val)
+ # else:
+ # msg.data.append(gripper_val)
+ self.joint_state_msg.position = msg.data
+ self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
+ self.fks_ = self.chain_.forward_kinematics(self.thetas_)
+ self.panda_joint_state_publisher_.publish(self.joint_state_msg)
+
+
+
+
+def main(args=None):
+ rclpy.init(args=args)
+
+ write_data = WriteData()
+
+ rclpy.spin(write_data)
+
+ # Destroy the node explicitly
+ # (optional - otherwise it will be done automatically
+ # when the garbage collector destroys the node object)
+ write_data.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_to_ur5_no_gripper_real_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_to_ur5_no_gripper_real_better.py
deleted file mode 100644
index 3532412..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_to_ur5_no_gripper_real_better.py
+++ /dev/null
@@ -1,1042 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray,Bool
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-from message_filters import ApproximateTimeSynchronizer, Subscriber
-import pathlib
-
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = False
- file_directory = pathlib.Path(__file__).parent.resolve()
- self.panda_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
- self.panda_solver_ = TracIKSolver(self.panda_urdf_,"panda_link0","panda_link8")
- self.ur5_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/ur5_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
- self.ur5_solver_ = TracIKSolver(self.ur5_urdf_,"base_link","wrist_3_link")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
- self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
- self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
- self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
- self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
- self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.ur5_rgb_image_subscriber_,self.ur5_depth_image_subscriber_,self.panda_rgb_image_subscriber_,self.panda_depth_image_subscriber_],1,0.1)
- self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
- self.subscription_ = self.create_subscription(
- InputFilesRobosuite,
- '/input_files',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesRealData,
- 'input_files_data_real',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([0,0,0,0,0,0])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.real_rgb_ = None
- self.real_depth_ = None
- self.real_seg_ = None
- self.real_qout_list_ = None
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
-
- self.ur5_rgb_ = None
- self.ur5_depth_ = None
- self.panda_rgb_ = None
- self.panda_depth_ = None
- self.ur5_camera_color_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/image_raw',
- self.ur5RgbCallback,
- 1
- )
-
- self.ur5_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/depth/image_raw',
- self.ur5DepthCallback,
- 1
- )
-
- self.panda_camera_color_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/image_raw',
- self.pandaRgbCallback,
- 1
- )
-
- self.panda_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/depth/image_raw',
- self.pandaDepthCallback,
- 1
- )
-
- self.joint_subscription_ = self.create_subscription(
- JointState,
- '/gazebo_joint_states',
- self.noTimeGazeboCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.updated_joints_ = False
- self.is_ready_ = True
-
- def ur5RgbCallback(self,msg):
- self.ur5_rgb_ = msg
-
- def ur5DepthCallback(self,msg):
- self.ur5_depth_ = msg
-
- def pandaRgbCallback(self,msg):
- self.panda_rgb_ = msg
-
- def pandaDepthCallback(self,msg):
- self.panda_depth_ = msg
-
- def noTimeGazeboCallback(self,joint_msg):
- return
-
- def gazeboCallback(self,joints,gazebo_rgb,gazebo_depth,real_rgb,real_depth):
-
- start_time = time.time()
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time: " + str(end_time - start_time) + " seconds")
- return
- #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
-
- #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
- # self.updated_joints_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/lawrence/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
-
- depth_data = pointcloud_np[:,2]
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- depth_np = depth
- seg = seg_file
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- real_depth_image_unmasked = depth_np
- real_rgb_image_unmasked = rgb_np
- real_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
- inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- cv2.imwrite('dilate_real_rgb_image_masked.png',real_rgb_image_masked)
- cv2.imwrite('dilate_real_segmentation_mask_255.png',real_segmentation_mask_255)
- cv2.imwrite('dilate_real_rgb_image_masked_inpaint.png',real_rgb_image_masked_inpaint)
- cv2.imwrite('diff.png',abs(real_rgb_image_masked-real_rgb_image_masked_inpaint))
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- # gazebo_robot_only_lab[:,:,0] += 50
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- cv2.imwrite('attempt.png',attempt)
- cv2.imwrite('attempt2.png',attempt2)
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- cv2.imwrite('inpaint.png',image_8bit)
- import pdb
- pdb.set_trace()
- if self.offline_:
- if not os.path.exists(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results'):
- os.makedirs(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results')
-
- last_slash_index = self.seg_file_.rfind('/')
- underscore_before_last_slash_index = self.seg_file_.rfind('_', 0, last_slash_index)
- str_num = self.seg_file_[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
- mask_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/mask' + str_num +'.png'
- attempt_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/attempt' + str_num +'.png'
- cv2.imwrite(inpaint_file,image_8bit)
- cv2.imwrite(mask_file,gazebo_segmentation_mask_255)
- cv2.imwrite(attempt_file,attempt)
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(gazebo_segmentation_mask_255,encoding="mono8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- self.mask_image_publisher_.publish(mask_image_msg)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
-
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def replace_nan_with_neighbors_average(self,array):
- def replace_function(subarray):
- center = subarray[4]
- if(center != np.nan):
- return center
- valid_values = subarray[subarray != -np.inf]
- if valid_values.size == 0:
- return np.nan
- else:
- return np.nanmean(valid_values)
-
- return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.offline_ = True
- start_time = time.time()
- # path_array = msg.rgb.split('/')
- # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- rgb_image = cv2.imread(msg.rgb)
- # cv2.imwrite(input_rgb_path,rgb_image)
- rgb_array = rgb_image.flatten().tolist()
- depth_image = np.load(msg.depth)
- # path_array = msg.depth.split('/')
- # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_depth_path,depth_image)
- depth_array = depth_image.flatten().tolist()
- self.seg_file_ = msg.segmentation
- segmentation_image = cv2.imread(msg.segmentation)
- # path_array = msg.segmentation.split('/')
- # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # cv2.imwrite(input_segmentation_path,segmentation_image)
- segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- joint_array = np.load(msg.joints).tolist()
- # path_array = msg.joints.split('/')
- # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_joints_path,np.array(joint_array))
- input_file_robosuite_data_msg = InputFilesRobosuiteData()
- input_file_robosuite_data_msg.rgb = rgb_array
- input_file_robosuite_data_msg.depth_map = depth_array
- input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- input_file_robosuite_data_msg.joints = joint_array
- self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- return
- rgb = msg.rgb
- depth = msg.depth
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.load(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb,depth,segmentation)
- end_time = time.time()
-
- def listenerCallbackOnlineDebug(self,msg):
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(msg.rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- cv2.imwrite(online_input_num_folder+'/rgb.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.rgb.height,msg.rgb.width))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- cv2.imwrite('inf_mask.png',np.isinf(depth_np).astype(np.uint8)*255)
- cv2.imwrite('nan_mask.png',np.isnan(depth_np).astype(np.uint8)*255)
- depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
- depth_np[np.isnan(depth_np)] = 0
- cv2.imwrite('og_depth.png',self.normalize_depth_image(depth_np))
- panda_joints = np.array(msg.joints)
- ee_pose = self.panda_solver_.fk(panda_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
- ee_pose = ee_pose @ end_effector_rotation_with_no_translation
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.ur5_solver_.ik(ee_pose,qinit=self.q_init_)
- b_xyz = 1e-5
- b_rpy = 1e-3
- while(qout is None):
- b_xyz *= 10
- b_rpy *= 10
- qout = self.ur5_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
- print("Couldn't find good IK")
- qout = self.q_init_
- print("Bound xyz: " + str(b_xyz))
- #qout[0] -= math.pi
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- qout_list.extend(panda_joints)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.real_rgb_ = msg.rgb
- self.real_depth_ = depth_np
- self.real_qout_list_ = qout_list
- # else:
- # qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
- # print("WARNING HIT IK ON FRANKA ERROR")
-
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_multi_real_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_multi_real_better.py
deleted file mode 100644
index 244a3c9..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_multi_real_better.py
+++ /dev/null
@@ -1,3580 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray,Bool
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-from message_filters import ApproximateTimeSynchronizer, Subscriber
-import pathlib
-
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = True
- self.float_image_ = False
- file_directory = pathlib.Path(__file__).parent.resolve()
- self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
- self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
- self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
- self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
- self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
- self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
- self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
- self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
- self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.ur5_rgb_image_subscriber_,self.ur5_depth_image_subscriber_,self.panda_rgb_image_subscriber_,self.panda_depth_image_subscriber_],1,0.2)
- self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
- self.subscription_ = self.create_subscription(
- InputFilesRobosuite,
- '/input_files',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesRealDataMulti,
- 'input_files_data_real',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.left_real_rgb_ = None
- self.right_real_rgb_ = None
- self.left_real_depth_ = None
- self.right_real_depth_ = None
- self.real_seg_ = None
- self.real_qout_list_ = None
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
-
- self.ur5_left_rgb_ = None
- self.ur5_left_depth_ = None
- self.panda_left_rgb_ = None
- self.panda_left_depth_ = None
- self.ur5_left_camera_color_subscription_ = self.create_subscription(
- Image,
- '/ur5_left_camera/image_raw',
- self.ur5LeftRgbCallback,
- 1
- )
-
- self.ur5_left_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/ur5_left_camera/depth/image_raw',
- self.ur5LeftDepthCallback,
- 1
- )
-
- self.panda_left_camera_color_subscription_ = self.create_subscription(
- Image,
- '/panda_left_camera/image_raw',
- self.pandaLeftRgbCallback,
- 1
- )
-
- self.panda_left_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/panda_left_camera/depth/image_raw',
- self.pandaLeftDepthCallback,
- 1
- )
-
- self.ur5_right_rgb_ = None
- self.ur5_right_depth_ = None
- self.panda_right_rgb_ = None
- self.panda_right_depth_ = None
- self.ur5_right_camera_color_subscription_ = self.create_subscription(
- Image,
- '/ur5_right_camera/image_raw',
- self.ur5RightRgbCallback,
- 1
- )
-
- self.ur5_right_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/ur5_right_camera/depth/image_raw',
- self.ur5RightDepthCallback,
- 1
- )
-
- self.panda_right_camera_color_subscription_ = self.create_subscription(
- Image,
- '/panda_right_camera/image_raw',
- self.pandaRightRgbCallback,
- 1
- )
-
- self.panda_right_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/panda_right_camera/depth/image_raw',
- self.pandaRightDepthCallback,
- 1
- )
-
- self.joint_subscription_ = self.create_subscription(
- JointState,
- '/gazebo_joint_states',
- self.noTimeGazeboCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.updated_joints_ = False
- self.is_ready_ = True
-
- def ur5LeftRgbCallback(self,msg):
- self.ur5_left_rgb_ = msg
-
- def ur5LeftDepthCallback(self,msg):
- self.ur5_left_depth_ = msg
-
- def pandaLeftRgbCallback(self,msg):
- self.panda_left_rgb_ = msg
-
- def pandaLeftDepthCallback(self,msg):
- self.panda_left_depth_ = msg
-
- def ur5RightRgbCallback(self,msg):
- self.ur5_right_rgb_ = msg
-
- def ur5RightDepthCallback(self,msg):
- self.ur5_right_depth_ = msg
-
- def pandaRightRgbCallback(self,msg):
- self.panda_right_rgb_ = msg
-
- def pandaRightDepthCallback(self,msg):
- self.panda_right_depth_ = msg
-
- def noTimeGazeboCallback(self,joint_msg):
- start_time = time.time()
- left_inpainted_image = self.doFullInpainting(self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_rgb_,self.panda_left_depth_)
- right_inpainted_image = self.doFullInpainting(self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_rgb_,self.panda_right_depth_)
- inpainted_image_msg = MultipleInpaintImages()
- inpainted_image_msg.images = [left_inpainted_image,right_inpainted_image]
- self.inpainted_publisher_.publish(inpainted_image_msg)
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('left_inpainting'):
- os.makedirs('left_inpainting')
- if not os.path.exists('right_inpainting'):
- os.makedirs('right_inpainting')
- cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
- cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
-
- def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth):
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- return self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
-
- def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
- return
- start_time = time.time()
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
- #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
-
- #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
- # self.updated_joints_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/lawrence/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
-
- depth_data = pointcloud_np[:,2]
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
- rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
- if(rgb.dtype == np.float32):
- rgb = (rgb * 255).astype(np.uint8)
- # rgb = cv2.resize(rgb,(128,128))
- # seg_file = cv2.resize(seg_file,(128,128))
- # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
- gazebo_segmentation_mask_255 = gazebo_seg
- inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
- cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
- inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
- outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
- # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
- #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
- #gazebo_robot_only_lab[:,:,0] += 10
- #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
- #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- cv2.imwrite('gazebo_robot_only.png',gazebo_only)
- background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
- inverted_seg_file_original = cv2.bitwise_not(seg_file)
- #cv2.imwrite('inverted_seg_file_original.png',inverted_seg_file_original)
- inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
- #cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
- background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
- cv2.imwrite('background_only.png',background_only)
- inpainted_image = gazebo_only + background_only
- #cv2.imwrite('no_fill.png',inpainted_image)
-
- better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
- cv2.imwrite('better_dilated_blend_mask.png',better_dilated_blend_mask)
- cv2.imwrite('pre_inpainted_image.png',inpainted_image)
- cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
-
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- inpainted_image = cv2_inpaint_image
- cv2.imwrite('og_inpainted_image.png',inpainted_image)
- import pdb
- pdb.set_trace()
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
- if(self.float_image_):
- inpainted_image = (inpainted_image / 255.0).astype(np.float32)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
- inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
- mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
- inpainted_image_msgs = MultipleInpaintImages()
- inpainted_image_msgs.images = [inpainted_image_msg,inpainted_image_msg]
- self.inpainted_publisher_.publish(inpainted_image_msgs)
- self.mask_image_publisher_.publish(mask_image_msg)
-
-# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
-# better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
-# target_color = (39,43,44)
-# target_mask = np.zeros_like(inpainted_image)
-# target_mask[:,:] = target_color
-# got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
-# #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
-# image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
-# image_8bit = got_away
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- return
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- depth_np = depth
- seg = seg_file
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- #rgb_np = cv2.resize(rgb_np,(128,128))
- #depth_np = cv2.resize(depth_np,(128,128))
- #seg = cv2.resize(seg,(128,128))
- #gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- #gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- #gazebo_depth = cv2.resize(gazebo_depth,(128,128))
- real_depth_image_unmasked = depth_np
- real_rgb_image_unmasked = rgb_np
- real_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
- inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_real_segmentation_mask_255.png',inverted_real_segmentation_mask_255)
- outline_mask = abs(inverted_real_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
-
- # real_rgb_image_masked = cv2.inpaint(real_rgb_image_masked,outline_mask,3,cv2.INPAINT_TELEA)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- real_rgb_image_masked_inpaint = real_rgb_image_masked
-
-
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 100
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
-
- inpainted_image = cv2.add(attempt,attempt2)#attempt + attempt2
-
- better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
- better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- image_8bit = got_away
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',image_8bit)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_blend_mask.astype(np.uint8))
- # cv2.imwrite('small_inpaint.png',cv2.resize(image_8bit,(128,128)))
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_blend_mask.astype(np.uint8),encoding="mono8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- self.mask_image_publisher_.publish(mask_image_msg)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
-
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def replace_nan_with_neighbors_average(self,array):
- def replace_function(subarray):
- center = subarray[4]
- if(center != np.nan):
- return center
- valid_values = subarray[subarray != -np.inf]
- if valid_values.size == 0:
- return np.nan
- else:
- return np.nanmean(valid_values)
-
- return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.offline_ = True
- start_time = time.time()
- # path_array = msg.rgb.split('/')
- # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- rgb_image = cv2.imread(msg.rgb)
- # cv2.imwrite(input_rgb_path,rgb_image)
- rgb_array = rgb_image.flatten().tolist()
- depth_image = np.load(msg.depth)
- # path_array = msg.depth.split('/')
- # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_depth_path,depth_image)
- depth_array = depth_image.flatten().tolist()
- self.seg_file_ = msg.segmentation
- segmentation_image = cv2.imread(msg.segmentation)
- # path_array = msg.segmentation.split('/')
- # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # cv2.imwrite(input_segmentation_path,segmentation_image)
- segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- joint_array = np.load(msg.joints).tolist()
- # path_array = msg.joints.split('/')
- # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_joints_path,np.array(joint_array))
- input_file_robosuite_data_msg = InputFilesRobosuiteData()
- input_file_robosuite_data_msg.rgb = rgb_array
- input_file_robosuite_data_msg.depth_map = depth_array
- input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- input_file_robosuite_data_msg.joints = joint_array
- self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- return
- rgb = msg.rgb
- depth = msg.depth
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.load(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb,depth,segmentation)
- end_time = time.time()
-
- def listenerCallbackOnlineDebug(self,msg):
- if(len(msg.data_pieces) == 2):
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
- left_msg = msg.data_pieces[0]
- right_msg = msg.data_pieces[1]
- msg = msg.data_pieces[0]
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
- if(left_rgb_np.dtype == np.float32):
- self.float_image_ = True
- left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
- left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
- left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
- left_depth_np[np.isnan(left_depth_np)] = 0
-
- right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
- if(right_rgb_np.dtype == np.float32):
- self.float_image_ = True
- right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- right_depth_np = np.array(right_msg.depth_map,dtype=np.float64).reshape((right_msg.rgb.height,right_msg.rgb.width))
- right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
- right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
- right_depth_np[np.isnan(right_depth_np)] = 0
-
- panda_ur5_gripper_joints = np.array(left_msg.joints)
- panda_ur5_gripper_joints = panda_ur5_gripper_joints[:-1]
- ee_pose = self.panda_ur5_gripper_solver_.fk(panda_ur5_gripper_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
- ee_pose = ee_pose @ end_effector_rotation_with_no_translation
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- b_xyz = 1e-5
- b_rpy = 1e-3
- while(qout is None):
- b_xyz *= 10
- b_rpy *= 10
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
- print("Couldn't find good IK")
- qout = self.q_init_
- print("Bound xyz: " + str(b_xyz))
- self.q_init_ = qout
- panda_ur5_arm_joints = panda_ur5_gripper_joints.tolist()
- panda_ur5_gripper_joint = 0.0
- panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
- panda_arm_joints = qout.tolist()
- panda_gripper_joint = 0.04
- panda_gripper_joints = [panda_gripper_joint] * 2
- full_joint_list = []
-
- full_joint_list.extend(panda_arm_joints)
- full_joint_list.extend(panda_ur5_arm_joints)
- qout_msg = Float64MultiArray()
- qout_msg.data = full_joint_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.left_real_rgb_ = left_msg.rgb
- self.right_real_rgb_ = right_msg.rgb
-
- self.left_real_depth_ = left_depth_np
- self.right_real_depth_ = right_depth_np
- self.real_qout_list_ = full_joint_list
- end_time = time.time()
- print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
- # else:
- # qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
- # print("WARNING HIT IK ON FRANKA ERROR")
-
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray,Bool
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-from message_filters import ApproximateTimeSynchronizer, Subscriber
-import pathlib
-
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = True
- self.float_image_ = False
- file_directory = pathlib.Path(__file__).parent.resolve()
- self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
- self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
- self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
- self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
- self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
- self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
- self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
- self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
- self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.ur5_rgb_image_subscriber_,self.ur5_depth_image_subscriber_,self.panda_rgb_image_subscriber_,self.panda_depth_image_subscriber_],1,0.2)
- self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
- self.subscription_ = self.create_subscription(
- InputFilesRobosuite,
- '/input_files',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesRealDataMulti,
- 'input_files_data_real',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.real_rgb_ = None
- self.real_depth_ = None
- self.real_seg_ = None
- self.real_qout_list_ = None
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
-
- self.ur5_rgb_ = None
- self.ur5_depth_ = None
- self.panda_rgb_ = None
- self.panda_depth_ = None
- self.ur5_camera_color_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/image_raw',
- self.ur5RgbCallback,
- 1
- )
-
- self.ur5_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/depth/image_raw',
- self.ur5DepthCallback,
- 1
- )
-
- self.panda_camera_color_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/image_raw',
- self.pandaRgbCallback,
- 1
- )
-
- self.panda_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/depth/image_raw',
- self.pandaDepthCallback,
- 1
- )
-
- self.joint_subscription_ = self.create_subscription(
- JointState,
- '/gazebo_joint_states',
- self.noTimeGazeboCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.updated_joints_ = False
- self.is_ready_ = True
-
- def ur5RgbCallback(self,msg):
- self.ur5_rgb_ = msg
-
- def ur5DepthCallback(self,msg):
- self.ur5_depth_ = msg
-
- def pandaRgbCallback(self,msg):
- self.panda_rgb_ = msg
-
- def pandaDepthCallback(self,msg):
- self.panda_depth_ = msg
-
- def noTimeGazeboCallback(self,joint_msg):
- start_time = time.time()
- real_rgb = self.ur5_rgb_
- real_depth = self.ur5_depth_
- gazebo_rgb = self.panda_rgb_
- gazebo_depth = self.panda_depth_
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
-
- def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
- return
- start_time = time.time()
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
- #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
-
- #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
- # self.updated_joints_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/lawrence/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
-
- depth_data = pointcloud_np[:,2]
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
- rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
- if(rgb.dtype == np.float32):
- rgb = (rgb * 255).astype(np.uint8)
- # rgb = cv2.resize(rgb,(128,128))
- # seg_file = cv2.resize(seg_file,(128,128))
- # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
- gazebo_segmentation_mask_255 = gazebo_seg
- inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
- cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
- inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
- outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
- # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
- #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
- #gazebo_robot_only_lab[:,:,0] += 10
- #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
- #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- cv2.imwrite('gazebo_robot_only.png',gazebo_only)
- background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
- inverted_seg_file_original = cv2.bitwise_not(seg_file)
- #cv2.imwrite('inverted_seg_file_original.png',inverted_seg_file_original)
- inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
- #cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
- background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
- cv2.imwrite('background_only.png',background_only)
- inpainted_image = gazebo_only + background_only
- #cv2.imwrite('no_fill.png',inpainted_image)
-
- better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
- cv2.imwrite('better_dilated_blend_mask.png',better_dilated_blend_mask)
- cv2.imwrite('pre_inpainted_image.png',inpainted_image)
- cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
-
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- inpainted_image = cv2_inpaint_image
- cv2.imwrite('og_inpainted_image.png',inpainted_image)
- import pdb
- pdb.set_trace()
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
- if(self.float_image_):
- inpainted_image = (inpainted_image / 255.0).astype(np.float32)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
- inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
- mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
- inpainted_image_msgs = MultipleInpaintImages()
- inpainted_image_msgs.images = [inpainted_image_msg,inpainted_image_msg]
- self.inpainted_publisher_.publish(inpainted_image_msgs)
- self.mask_image_publisher_.publish(mask_image_msg)
-
-# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
-# better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
-# target_color = (39,43,44)
-# target_mask = np.zeros_like(inpainted_image)
-# target_mask[:,:] = target_color
-# got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
-# #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
-# image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
-# image_8bit = got_away
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- return
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- depth_np = depth
- seg = seg_file
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- #rgb_np = cv2.resize(rgb_np,(128,128))
- #depth_np = cv2.resize(depth_np,(128,128))
- #seg = cv2.resize(seg,(128,128))
- #gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- #gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- #gazebo_depth = cv2.resize(gazebo_depth,(128,128))
- real_depth_image_unmasked = depth_np
- real_rgb_image_unmasked = rgb_np
- real_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
- inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_real_segmentation_mask_255.png',inverted_real_segmentation_mask_255)
- outline_mask = abs(inverted_real_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
-
- # real_rgb_image_masked = cv2.inpaint(real_rgb_image_masked,outline_mask,3,cv2.INPAINT_TELEA)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- real_rgb_image_masked_inpaint = real_rgb_image_masked
-
-
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 100
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
-
- inpainted_image = cv2.add(attempt,attempt2)#attempt + attempt2
-
- better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
- better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- image_8bit = got_away
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',image_8bit)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_blend_mask.astype(np.uint8))
- # cv2.imwrite('small_inpaint.png',cv2.resize(image_8bit,(128,128)))
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_blend_mask.astype(np.uint8),encoding="mono8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- self.mask_image_publisher_.publish(mask_image_msg)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
-
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def replace_nan_with_neighbors_average(self,array):
- def replace_function(subarray):
- center = subarray[4]
- if(center != np.nan):
- return center
- valid_values = subarray[subarray != -np.inf]
- if valid_values.size == 0:
- return np.nan
- else:
- return np.nanmean(valid_values)
-
- return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.offline_ = True
- start_time = time.time()
- # path_array = msg.rgb.split('/')
- # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- rgb_image = cv2.imread(msg.rgb)
- # cv2.imwrite(input_rgb_path,rgb_image)
- rgb_array = rgb_image.flatten().tolist()
- depth_image = np.load(msg.depth)
- # path_array = msg.depth.split('/')
- # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_depth_path,depth_image)
- depth_array = depth_image.flatten().tolist()
- self.seg_file_ = msg.segmentation
- segmentation_image = cv2.imread(msg.segmentation)
- # path_array = msg.segmentation.split('/')
- # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # cv2.imwrite(input_segmentation_path,segmentation_image)
- segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- joint_array = np.load(msg.joints).tolist()
- # path_array = msg.joints.split('/')
- # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_joints_path,np.array(joint_array))
- input_file_robosuite_data_msg = InputFilesRobosuiteData()
- input_file_robosuite_data_msg.rgb = rgb_array
- input_file_robosuite_data_msg.depth_map = depth_array
- input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- input_file_robosuite_data_msg.joints = joint_array
- self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- return
- rgb = msg.rgb
- depth = msg.depth
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.load(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb,depth,segmentation)
- end_time = time.time()
-
- def listenerCallbackOnlineDebug(self,msg):
- msg = msg.data_pieces[0]
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(msg.rgb)
- if(rgb_np.dtype == np.float32):
- self.float_image_ = True
- rgb_np = (rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- if not os.path.exists('original_rgb'):
- os.makedirs('original_rgb')
- cv2.imwrite('original_rgb/rgb' + str(self.i_) +'.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.rgb.height,msg.rgb.width))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- cv2.imwrite('inf_mask.png',np.isinf(depth_np).astype(np.uint8)*255)
- cv2.imwrite('nan_mask.png',np.isnan(depth_np).astype(np.uint8)*255)
- depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
- depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
- depth_np[np.isnan(depth_np)] = 0
- cv2.imwrite('og_depth.png',self.normalize_depth_image(depth_np))
- panda_ur5_gripper_joints = np.array(msg.joints)
- panda_ur5_gripper_joints = panda_ur5_gripper_joints[:-1]
- ee_pose = self.panda_ur5_gripper_solver_.fk(panda_ur5_gripper_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
- ee_pose = ee_pose @ end_effector_rotation_with_no_translation
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- b_xyz = 1e-5
- b_rpy = 1e-3
- while(qout is None):
- b_xyz *= 10
- b_rpy *= 10
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
- print("Couldn't find good IK")
- qout = self.q_init_
- print("Bound xyz: " + str(b_xyz))
- self.q_init_ = qout
- panda_ur5_arm_joints = panda_ur5_gripper_joints.tolist()
- panda_ur5_gripper_joint = 0.0
- panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
- panda_arm_joints = qout.tolist()
- panda_gripper_joint = 0.04
- panda_gripper_joints = [panda_gripper_joint] * 2
- full_joint_list = []
-
- full_joint_list.extend(panda_arm_joints)
- full_joint_list.extend(panda_ur5_arm_joints)
- qout_msg = Float64MultiArray()
- qout_msg.data = full_joint_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.real_rgb_ = msg.rgb
- self.real_depth_ = depth_np
- self.real_qout_list_ = full_joint_list
- end_time = time.time()
- print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
- # else:
- # qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
- # print("WARNING HIT IK ON FRANKA ERROR")
-
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray,Bool
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-from message_filters import ApproximateTimeSynchronizer, Subscriber
-import pathlib
-
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = True
- self.float_image_ = False
- file_directory = pathlib.Path(__file__).parent.resolve()
- self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
- self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
- self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
- self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
- self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
- self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
- self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
- self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
- self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.ur5_rgb_image_subscriber_,self.ur5_depth_image_subscriber_,self.panda_rgb_image_subscriber_,self.panda_depth_image_subscriber_],1,0.2)
- self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
- self.subscription_ = self.create_subscription(
- InputFilesRobosuite,
- '/input_files',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesRealDataMulti,
- 'input_files_data_real',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.real_rgb_ = None
- self.real_depth_ = None
- self.real_seg_ = None
- self.real_qout_list_ = None
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
-
- self.ur5_rgb_ = None
- self.ur5_depth_ = None
- self.panda_rgb_ = None
- self.panda_depth_ = None
- self.ur5_camera_color_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/image_raw',
- self.ur5RgbCallback,
- 1
- )
-
- self.ur5_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/depth/image_raw',
- self.ur5DepthCallback,
- 1
- )
-
- self.panda_camera_color_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/image_raw',
- self.pandaRgbCallback,
- 1
- )
-
- self.panda_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/depth/image_raw',
- self.pandaDepthCallback,
- 1
- )
-
- self.joint_subscription_ = self.create_subscription(
- JointState,
- '/gazebo_joint_states',
- self.noTimeGazeboCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.updated_joints_ = False
- self.is_ready_ = True
-
- def ur5RgbCallback(self,msg):
- self.ur5_rgb_ = msg
-
- def ur5DepthCallback(self,msg):
- self.ur5_depth_ = msg
-
- def pandaRgbCallback(self,msg):
- self.panda_rgb_ = msg
-
- def pandaDepthCallback(self,msg):
- self.panda_depth_ = msg
-
- def noTimeGazeboCallback(self,joint_msg):
- start_time = time.time()
- real_rgb = self.ur5_rgb_
- real_depth = self.ur5_depth_
- gazebo_rgb = self.panda_rgb_
- gazebo_depth = self.panda_depth_
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
-
- def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
- return
- start_time = time.time()
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
- #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
-
- #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
- # self.updated_joints_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/lawrence/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
-
- depth_data = pointcloud_np[:,2]
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
- rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
- if(rgb.dtype == np.float32):
- rgb = (rgb * 255).astype(np.uint8)
- # rgb = cv2.resize(rgb,(128,128))
- # seg_file = cv2.resize(seg_file,(128,128))
- # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
- gazebo_segmentation_mask_255 = gazebo_seg
- inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
- cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
- inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
- outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
- # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
- #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
- #gazebo_robot_only_lab[:,:,0] += 10
- #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
- #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- cv2.imwrite('gazebo_robot_only.png',gazebo_only)
- background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
- inverted_seg_file_original = cv2.bitwise_not(seg_file)
- #cv2.imwrite('inverted_seg_file_original.png',inverted_seg_file_original)
- inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
- #cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
- background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
- cv2.imwrite('background_only.png',background_only)
- inpainted_image = gazebo_only + background_only
- #cv2.imwrite('no_fill.png',inpainted_image)
-
- better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
- cv2.imwrite('better_dilated_blend_mask.png',better_dilated_blend_mask)
- cv2.imwrite('pre_inpainted_image.png',inpainted_image)
- cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
-
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- inpainted_image = cv2_inpaint_image
- cv2.imwrite('og_inpainted_image.png',inpainted_image)
- import pdb
- pdb.set_trace()
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
- if(self.float_image_):
- inpainted_image = (inpainted_image / 255.0).astype(np.float32)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
- inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
- mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
- inpainted_image_msgs = MultipleInpaintImages()
- inpainted_image_msgs.images = [inpainted_image_msg,inpainted_image_msg]
- self.inpainted_publisher_.publish(inpainted_image_msgs)
- self.mask_image_publisher_.publish(mask_image_msg)
-
-# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
-# better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
-# target_color = (39,43,44)
-# target_mask = np.zeros_like(inpainted_image)
-# target_mask[:,:] = target_color
-# got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
-# #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
-# image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
-# image_8bit = got_away
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- return
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- depth_np = depth
- seg = seg_file
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- #rgb_np = cv2.resize(rgb_np,(128,128))
- #depth_np = cv2.resize(depth_np,(128,128))
- #seg = cv2.resize(seg,(128,128))
- #gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- #gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- #gazebo_depth = cv2.resize(gazebo_depth,(128,128))
- real_depth_image_unmasked = depth_np
- real_rgb_image_unmasked = rgb_np
- real_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
- inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_real_segmentation_mask_255.png',inverted_real_segmentation_mask_255)
- outline_mask = abs(inverted_real_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
-
- # real_rgb_image_masked = cv2.inpaint(real_rgb_image_masked,outline_mask,3,cv2.INPAINT_TELEA)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- real_rgb_image_masked_inpaint = real_rgb_image_masked
-
-
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 100
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
-
- inpainted_image = cv2.add(attempt,attempt2)#attempt + attempt2
-
- better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
- better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- image_8bit = got_away
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',image_8bit)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_blend_mask.astype(np.uint8))
- # cv2.imwrite('small_inpaint.png',cv2.resize(image_8bit,(128,128)))
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_blend_mask.astype(np.uint8),encoding="mono8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- self.mask_image_publisher_.publish(mask_image_msg)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
-
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def replace_nan_with_neighbors_average(self,array):
- def replace_function(subarray):
- center = subarray[4]
- if(center != np.nan):
- return center
- valid_values = subarray[subarray != -np.inf]
- if valid_values.size == 0:
- return np.nan
- else:
- return np.nanmean(valid_values)
-
- return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.offline_ = True
- start_time = time.time()
- # path_array = msg.rgb.split('/')
- # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- rgb_image = cv2.imread(msg.rgb)
- # cv2.imwrite(input_rgb_path,rgb_image)
- rgb_array = rgb_image.flatten().tolist()
- depth_image = np.load(msg.depth)
- # path_array = msg.depth.split('/')
- # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_depth_path,depth_image)
- depth_array = depth_image.flatten().tolist()
- self.seg_file_ = msg.segmentation
- segmentation_image = cv2.imread(msg.segmentation)
- # path_array = msg.segmentation.split('/')
- # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # cv2.imwrite(input_segmentation_path,segmentation_image)
- segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- joint_array = np.load(msg.joints).tolist()
- # path_array = msg.joints.split('/')
- # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_joints_path,np.array(joint_array))
- input_file_robosuite_data_msg = InputFilesRobosuiteData()
- input_file_robosuite_data_msg.rgb = rgb_array
- input_file_robosuite_data_msg.depth_map = depth_array
- input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- input_file_robosuite_data_msg.joints = joint_array
- self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- return
- rgb = msg.rgb
- depth = msg.depth
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.load(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb,depth,segmentation)
- end_time = time.time()
-
- def listenerCallbackOnlineDebug(self,msg):
- msg = msg.data_pieces[0]
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(msg.rgb)
- if(rgb_np.dtype == np.float32):
- self.float_image_ = True
- rgb_np = (rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- if not os.path.exists('original_rgb'):
- os.makedirs('original_rgb')
- cv2.imwrite('original_rgb/rgb' + str(self.i_) +'.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.rgb.height,msg.rgb.width))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- cv2.imwrite('inf_mask.png',np.isinf(depth_np).astype(np.uint8)*255)
- cv2.imwrite('nan_mask.png',np.isnan(depth_np).astype(np.uint8)*255)
- depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
- depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
- depth_np[np.isnan(depth_np)] = 0
- cv2.imwrite('og_depth.png',self.normalize_depth_image(depth_np))
- panda_ur5_gripper_joints = np.array(msg.joints)
- panda_ur5_gripper_joints = panda_ur5_gripper_joints[:-1]
- ee_pose = self.panda_ur5_gripper_solver_.fk(panda_ur5_gripper_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
- ee_pose = ee_pose @ end_effector_rotation_with_no_translation
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- b_xyz = 1e-5
- b_rpy = 1e-3
- while(qout is None):
- b_xyz *= 10
- b_rpy *= 10
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
- print("Couldn't find good IK")
- qout = self.q_init_
- print("Bound xyz: " + str(b_xyz))
- self.q_init_ = qout
- panda_ur5_arm_joints = panda_ur5_gripper_joints.tolist()
- panda_ur5_gripper_joint = 0.0
- panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
- panda_arm_joints = qout.tolist()
- panda_gripper_joint = 0.04
- panda_gripper_joints = [panda_gripper_joint] * 2
- full_joint_list = []
-
- full_joint_list.extend(panda_arm_joints)
- full_joint_list.extend(panda_ur5_arm_joints)
- qout_msg = Float64MultiArray()
- qout_msg.data = full_joint_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.real_rgb_ = msg.rgb
- self.real_depth_ = depth_np
- self.real_qout_list_ = full_joint_list
- end_time = time.time()
- print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
- # else:
- # qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
- # print("WARNING HIT IK ON FRANKA ERROR")
-
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_real_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_multi_real_better_early_inpaint.py
similarity index 78%
rename from mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_real_better.py
rename to mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_multi_real_better_early_inpaint.py
index 81124ec..cd84cb5 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_gripper_to_panda_ur5_gripper_real_better.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_multi_real_better_early_inpaint.py
@@ -3,7 +3,6 @@
import rclpy
from rclpy.node import Node
import trimesh
-import open3d as o3d
from std_msgs.msg import Header, Float64MultiArray,Bool
from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
import numpy as np
@@ -23,12 +22,11 @@
import cv2
from cv_bridge import CvBridge
import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData
+from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
from sensor_msgs.msg import JointState
from tracikpy import TracIKSolver
from mdh.kinematic_chain import KinematicChain
from mdh import UnReachable
-import kinpy as kp
from geometry_msgs.msg import Vector3, Quaternion
from scipy.spatial.transform import Rotation as R
from message_filters import ApproximateTimeSynchronizer, Subscriber
@@ -46,7 +44,7 @@ def __init__(self):
self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
+ # self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
# real_camera_link to world and then multiply translation by 1000
@@ -80,7 +78,7 @@ def __init__(self):
self.listenerCallback,
1)
self.subscription_data_ = self.create_subscription(
- InputFilesRealData,
+ InputFilesRealDataMulti,
'input_files_data_real',
self.listenerCallbackOnlineDebug,
1)
@@ -89,28 +87,13 @@ def __init__(self):
#Harcoding start position
self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
self.publishers_ = []
self.subscribers_ = []
self.timers_ = []
- self.real_rgb_ = None
- self.real_depth_ = None
+ self.left_real_rgb_ = None
+ self.right_real_rgb_ = None
+ self.left_real_depth_ = None
+ self.right_real_depth_ = None
self.real_seg_ = None
self.real_qout_list_ = None
self.i_ = -1
@@ -135,35 +118,99 @@ def __init__(self):
1
)
- self.ur5_rgb_ = None
- self.ur5_depth_ = None
- self.panda_rgb_ = None
- self.panda_depth_ = None
- self.ur5_camera_color_subscription_ = self.create_subscription(
+ self.ur5_left_rgb_ = None
+ self.ur5_left_depth_ = None
+ self.panda_left_rgb_ = None
+ self.panda_left_depth_ = None
+ self.ur5_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_left_camera/image_raw',
+ self.ur5LeftRgbCallback,
+ 1
+ )
+
+ self.ur5_left_camera_depth_subscription_ = self.create_subscription(
Image,
- '/ur5_camera/image_raw',
- self.ur5RgbCallback,
+ '/ur5_left_camera/depth/image_raw',
+ self.ur5LeftDepthCallback,
1
)
- self.ur5_camera_depth_subscription_ = self.create_subscription(
+ self.panda_left_camera_color_subscription_ = self.create_subscription(
Image,
- '/ur5_camera/depth/image_raw',
- self.ur5DepthCallback,
+ '/panda_left_camera/image_raw',
+ self.pandaLeftRgbCallback,
1
)
- self.panda_camera_color_subscription_ = self.create_subscription(
+ self.panda_left_camera_depth_subscription_ = self.create_subscription(
Image,
- '/panda_camera/image_raw',
- self.pandaRgbCallback,
+ '/panda_left_camera/depth/image_raw',
+ self.pandaLeftDepthCallback,
1
)
- self.panda_camera_depth_subscription_ = self.create_subscription(
+ self.ur5_right_rgb_ = None
+ self.ur5_right_depth_ = None
+ self.panda_right_rgb_ = None
+ self.panda_right_depth_ = None
+ self.ur5_right_camera_color_subscription_ = self.create_subscription(
Image,
- '/panda_camera/depth/image_raw',
- self.pandaDepthCallback,
+ '/ur5_right_camera/image_raw',
+ self.ur5RightRgbCallback,
+ 1
+ )
+
+ self.ur5_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_right_camera/depth/image_raw',
+ self.ur5RightDepthCallback,
+ 1
+ )
+
+ self.panda_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/image_raw',
+ self.pandaRightRgbCallback,
+ 1
+ )
+
+ self.panda_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/depth/image_raw',
+ self.pandaRightDepthCallback,
+ 1
+ )
+
+ self.panda_left_no_gripper_rgb_ = None
+ self.panda_left_no_gripper_depth_ = None
+ self.panda_right_no_gripper_rgb_ = None
+ self.panda_right_no_gripper_depth_ = None
+ self.panda_no_gripper_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_left_camera/image_raw',
+ self.pandaLeftNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_left_camera/depth/image_raw',
+ self.pandaLeftNoGripperDepthCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/image_raw',
+ self.pandaRightNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/depth/image_raw',
+ self.pandaRightNoGripperDepthCallback,
1
)
@@ -179,58 +226,79 @@ def __init__(self):
timer_period = 0.5
self.links_info_ = []
self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
+ self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
#self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
+ self.full_mask_image_publisher_ = self.create_publisher(MultipleInpaintImages,"full_mask_image",1)
self.updated_joints_ = False
self.is_ready_ = True
- def ur5RgbCallback(self,msg):
- self.ur5_rgb_ = msg
+ def ur5LeftRgbCallback(self,msg):
+ self.ur5_left_rgb_ = msg
- def ur5DepthCallback(self,msg):
- self.ur5_depth_ = msg
+ def ur5LeftDepthCallback(self,msg):
+ self.ur5_left_depth_ = msg
- def pandaRgbCallback(self,msg):
- self.panda_rgb_ = msg
+ def pandaLeftRgbCallback(self,msg):
+ self.panda_left_rgb_ = msg
- def pandaDepthCallback(self,msg):
- self.panda_depth_ = msg
+ def pandaLeftDepthCallback(self,msg):
+ self.panda_left_depth_ = msg
+
+ def ur5RightRgbCallback(self,msg):
+ self.ur5_right_rgb_ = msg
+
+ def ur5RightDepthCallback(self,msg):
+ self.ur5_right_depth_ = msg
+
+ def pandaRightRgbCallback(self,msg):
+ self.panda_right_rgb_ = msg
+
+ def pandaRightDepthCallback(self,msg):
+ self.panda_right_depth_ = msg
+
+ def pandaLeftNoGripperRgbCallback(self,msg):
+ self.panda_left_no_gripper_rgb_ = msg
+
+ def pandaLeftNoGripperDepthCallback(self,msg):
+ self.panda_left_no_gripper_depth_ = msg
+
+ def pandaRightNoGripperRgbCallback(self,msg):
+ self.panda_right_no_gripper_rgb_ = msg
+
+ def pandaRightNoGripperDepthCallback(self,msg):
+ self.panda_right_no_gripper_depth_ = msg
def noTimeGazeboCallback(self,joint_msg):
start_time = time.time()
- real_rgb = self.panda_rgb_
- real_depth = self.panda_depth_
- gazebo_rgb = self.ur5_rgb_
- gazebo_depth = self.ur5_depth_
+ left_inpainted_image,left_mask = self.doFullInpainting(self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_rgb_,self.panda_left_depth_,self.panda_left_no_gripper_depth_,self.left_real_rgb_,self.left_real_depth_)
+ right_inpainted_image,right_mask = self.doFullInpainting(self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_rgb_,self.panda_right_depth_,self.panda_right_no_gripper_depth_,self.right_real_rgb_,self.right_real_depth_)
+ inpainted_image_msg = MultipleInpaintImages()
+ mask_image_msg = MultipleInpaintImages()
+ inpainted_image_msg.images = [self.cv_bridge_.cv2_to_imgmsg(left_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(right_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(left_mask),self.cv_bridge_.cv2_to_imgmsg(right_mask)]
+ mask_image_msg.images = []
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.full_mask_image_publisher_.publish(mask_image_msg)
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('left_inpainting'):
+ os.makedirs('left_inpainting')
+ if not os.path.exists('right_inpainting'):
+ os.makedirs('right_inpainting')
+ if not os.path.exists('left_mask'):
+ os.makedirs('left_mask')
+ if not os.path.exists('right_mask'):
+ os.makedirs('right_mask')
+ if(self.float_image_):
+ left_inpainted_image = (left_inpainted_image * 255).astype(np.uint8)
+ right_inpainted_image = (right_inpainted_image * 255).astype(np.uint8)
+ left_mask = (left_mask * 255).astype(np.uint8)
+ right_mask = (right_mask * 255).astype(np.uint8)
+ cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
+ cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
+ cv2.imwrite('left_mask/mask'+ str(inpaint_number) +'.png',left_mask)
+ cv2.imwrite('right_mask/mask'+ str(inpaint_number) +'.png',right_mask)
+
+ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth,gazebo_no_gripper_depth,world_rgb,world_depth):
real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
@@ -240,6 +308,7 @@ def noTimeGazeboCallback(self,joint_msg):
cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
+ gazebo_no_gripper_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_no_gripper_depth)
real_seg_np = (real_depth_np < 8).astype(np.uint8)
real_seg_255_np = 255 * real_seg_np
@@ -247,16 +316,69 @@ def noTimeGazeboCallback(self,joint_msg):
gazebo_seg_255_np = 255 * gazebo_seg_np
cv2.imwrite('real_seg.png',real_seg_255_np)
cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
+ real_no_gripper_seg_255_np = (gazebo_no_gripper_depth_np < 8).astype(np.uint8)
+ real_no_gripper_seg_255_np = 255 * real_no_gripper_seg_255_np
+ real_rgb = world_rgb
+ real_depth = world_depth
real_seg = real_seg_255_np
gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
+ return self.dummyInpaintingEarly(real_rgb,real_seg,real_no_gripper_seg_255_np,gazebo_rgb_np,gazebo_seg_255_np)
+
+ def dummyInpaintingEarly(self,rgb,seg_file,seg_file_no_gripper,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file_no_gripper = cv2.threshold(seg_file_no_gripper, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('seg_file.png',seg_file)
+ cv2.imwrite('seg_file_no_gripper.png',seg_file_no_gripper)
+ seg_file_gripper = abs(seg_file - seg_file_no_gripper)
+ seg_file_gripper = cv2.erode(seg_file_gripper,np.ones((3,3),np.uint8),iterations=3)
+ seg_file_gripper = cv2.dilate(seg_file_gripper,np.ones((3,3),np.uint8),iterations=15)
+ cv2.imwrite('seg_file_gripper.png',seg_file_gripper)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file_no_gripper)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=30)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255 + seg_file_gripper
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre3.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ diffusion_input = cv2.bitwise_and(inpainted_image,inpainted_image,mask=cv2.bitwise_not(better_dilated_blend_mask))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ diffusion_input = (diffusion_input / 255.0).astype(np.float32)
+ return inpainted_image,diffusion_input
def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
return
@@ -510,13 +632,18 @@ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
#cv2.imwrite('no_fill.png',inpainted_image)
better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
- # cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
+ cv2.imwrite('better_dilated_blend_mask.png',better_dilated_blend_mask)
+ cv2.imwrite('pre_inpainted_image.png',inpainted_image)
+ cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
target_color = (39,43,44)
target_mask = np.zeros_like(inpainted_image)
target_mask[:,:] = target_color
inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- # inpainted_image = cv2_inpaint_image
+ inpainted_image = cv2_inpaint_image
+ cv2.imwrite('og_inpainted_image.png',inpainted_image)
+ import pdb
+ pdb.set_trace()
inpaint_number = str(self.i_).zfill(5)
if not os.path.exists('inpainting'):
os.makedirs('inpainting')
@@ -530,7 +657,9 @@ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
- self.inpainted_publisher_.publish(inpainted_image_msg)
+ inpainted_image_msgs = MultipleInpaintImages()
+ inpainted_image_msgs.images = [inpainted_image_msg,inpainted_image_msg]
+ self.inpainted_publisher_.publish(inpainted_image_msgs)
self.mask_image_publisher_.publish(mask_image_msg)
# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
@@ -1019,68 +1148,88 @@ def listenerCallback(self,msg):
end_time = time.time()
def listenerCallbackOnlineDebug(self,msg):
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
+ if(len(msg.data_pieces) == 2):
+ start_time = time.time()
+ self.i_ += 1
+ online_input_folder = 'offline_ur5e_input'
+ if not os.path.exists(online_input_folder):
+ os.makedirs(online_input_folder)
+ online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
+ if not os.path.exists(online_input_num_folder):
+ os.makedirs(online_input_num_folder)
+ left_msg = msg.data_pieces[0]
+ right_msg = msg.data_pieces[1]
+ elif(len(msg.data_pieces) == 1):
+ left_msg = msg.data_pieces[0]
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ if(left_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(msg.rgb)
- if(rgb_np.dtype == np.float32):
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
+ left_depth_np[np.isnan(left_depth_np)] = 0
+ if(left_rgb_np.dtype == np.float32):
self.float_image_ = True
- rgb_np = (rgb_np * 255).astype(np.uint8)
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
+
+ right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
# rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- if not os.path.exists('original_rgb'):
- os.makedirs('original_rgb')
- cv2.imwrite('original_rgb/rgb' + str(self.i_) +'.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.rgb.height,msg.rgb.width))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- cv2.imwrite('inf_mask.png',np.isinf(depth_np).astype(np.uint8)*255)
- cv2.imwrite('nan_mask.png',np.isnan(depth_np).astype(np.uint8)*255)
- depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
- depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
- depth_np[np.isnan(depth_np)] = 0
- cv2.imwrite('og_depth.png',self.normalize_depth_image(depth_np))
- panda_panda_gripper_joints = np.array(msg.joints)
- panda_panda_gripper_joints = panda_panda_gripper_joints[:-1]
- ee_pose = self.panda_panda_gripper_solver_.fk(panda_panda_gripper_joints)
+ right_depth_np = np.array(right_msg.depth_map,dtype=np.float64).reshape((right_msg.rgb.height,right_msg.rgb.width))
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
+ right_depth_np[np.isnan(right_depth_np)] = 0
+ if(right_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
+
+ panda_ur5_gripper_joints = np.array(msg.data_pieces[0].joints)
+ real_ee_gripper_joint = panda_ur5_gripper_joints[-1]
+ panda_ur5_gripper_joints = panda_ur5_gripper_joints[:-1]
+ ee_pose = self.panda_ur5_gripper_solver_.fk(panda_ur5_gripper_joints)
end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
ee_pose = ee_pose @ end_effector_rotation_with_no_translation
scipy_rotation = R.from_matrix(ee_pose[:3,:3])
scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
b_xyz = 1e-5
b_rpy = 1e-3
while(qout is None):
b_xyz *= 10
b_rpy *= 10
- qout = self.panda_ur5_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
if(b_xyz == 0.01):
print("Couldn't find good IK")
qout = self.q_init_
print("Bound xyz: " + str(b_xyz))
self.q_init_ = qout
- panda_arm_joints = panda_panda_gripper_joints.tolist()
- panda_gripper_joint = 0.04
- panda_gripper_joints = [panda_gripper_joint] * 2
- panda_ur5_arm_joints = qout.tolist()
- panda_ur5_gripper_joint = 0.0
+ panda_ur5_arm_joints = panda_ur5_gripper_joints.tolist()
+ panda_ur5_gripper_joint = 0.8 * real_ee_gripper_joint
panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
+ panda_arm_joints = qout.tolist()
+ panda_gripper_joint = -0.04 * real_ee_gripper_joint + 0.04
+ panda_gripper_joints = [panda_gripper_joint] * 2
full_joint_list = []
-
+
full_joint_list.extend(panda_arm_joints)
full_joint_list.extend(panda_ur5_arm_joints)
+ full_joint_list.extend(panda_gripper_joints)
+ full_joint_list.extend(panda_ur5_gripper_joints)
qout_msg = Float64MultiArray()
qout_msg.data = full_joint_list
self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.real_rgb_ = msg.rgb
- self.real_depth_ = depth_np
+ self.left_real_rgb_ = left_msg.rgb
+ self.right_real_rgb_ = right_msg.rgb
+
+ self.left_real_depth_ = left_depth_np
+ self.right_real_depth_ = right_depth_np
self.real_qout_list_ = full_joint_list
end_time = time.time()
print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
+
# else:
# qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
# print("WARNING HIT IK ON FRANKA ERROR")
@@ -1139,7 +1288,7 @@ def joint_commands_callback(self, msg):
# msg.data.append(gripper_val)
self.joint_state_msg.position = msg.data
self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
+ # self.fks_ = self.chain_.forward_kinematics(self.thetas_)
self.panda_joint_state_publisher_.publish(self.joint_state_msg)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_gripper_real_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_multi_real_better_wrist_early_inpaint.py
similarity index 75%
rename from mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_gripper_real_better.py
rename to mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_multi_real_better_wrist_early_inpaint.py
index f1e987e..2096fb7 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_gripper_real_better.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_multi_real_better_wrist_early_inpaint.py
@@ -3,7 +3,6 @@
import rclpy
from rclpy.node import Node
import trimesh
-import open3d as o3d
from std_msgs.msg import Header, Float64MultiArray,Bool
from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
import numpy as np
@@ -23,12 +22,11 @@
import cv2
from cv_bridge import CvBridge
import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData
+from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
from sensor_msgs.msg import JointState
from tracikpy import TracIKSolver
from mdh.kinematic_chain import KinematicChain
from mdh import UnReachable
-import kinpy as kp
from geometry_msgs.msg import Vector3, Quaternion
from scipy.spatial.transform import Rotation as R
from message_filters import ApproximateTimeSynchronizer, Subscriber
@@ -43,11 +41,11 @@ def __init__(self):
self.debug_ = True
self.float_image_ = False
file_directory = pathlib.Path(__file__).parent.resolve()
- self.panda_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
- self.panda_solver_ = TracIKSolver(self.panda_urdf_,"panda_link0","panda_ee")
- self.ur5_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/ur5_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
- self.ur5_solver_ = TracIKSolver(self.ur5_urdf_,"base_link","ur5_ee_gripper")
+ self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
+ self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
+ self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
+ # self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
+ self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
# real_camera_link to world and then multiply translation by 1000
# self.camera_to_world_ = np.array([[0,1,0,0],
@@ -80,7 +78,7 @@ def __init__(self):
self.listenerCallback,
1)
self.subscription_data_ = self.create_subscription(
- InputFilesRealData,
+ InputFilesRealDataMulti,
'input_files_data_real',
self.listenerCallbackOnlineDebug,
1)
@@ -89,28 +87,13 @@ def __init__(self):
#Harcoding start position
self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
self.publishers_ = []
self.subscribers_ = []
self.timers_ = []
- self.real_rgb_ = None
- self.real_depth_ = None
+ self.left_real_rgb_ = None
+ self.right_real_rgb_ = None
+ self.left_real_depth_ = None
+ self.right_real_depth_ = None
self.real_seg_ = None
self.real_qout_list_ = None
self.i_ = -1
@@ -135,35 +118,99 @@ def __init__(self):
1
)
- self.ur5_rgb_ = None
- self.ur5_depth_ = None
- self.panda_rgb_ = None
- self.panda_depth_ = None
- self.ur5_camera_color_subscription_ = self.create_subscription(
+ self.ur5_left_rgb_ = None
+ self.ur5_left_depth_ = None
+ self.panda_left_rgb_ = None
+ self.panda_left_depth_ = None
+ self.ur5_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_left_camera/image_raw',
+ self.ur5LeftRgbCallback,
+ 1
+ )
+
+ self.ur5_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_left_camera/depth/image_raw',
+ self.ur5LeftDepthCallback,
+ 1
+ )
+
+ self.panda_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_left_camera/image_raw',
+ self.pandaLeftRgbCallback,
+ 1
+ )
+
+ self.panda_left_camera_depth_subscription_ = self.create_subscription(
Image,
- '/ur5_camera/image_raw',
- self.ur5RgbCallback,
+ '/panda_left_camera/depth/image_raw',
+ self.pandaLeftDepthCallback,
1
)
- self.ur5_camera_depth_subscription_ = self.create_subscription(
+ self.ur5_right_rgb_ = None
+ self.ur5_right_depth_ = None
+ self.panda_right_rgb_ = None
+ self.panda_right_depth_ = None
+ self.ur5_right_camera_color_subscription_ = self.create_subscription(
Image,
- '/ur5_camera/depth/image_raw',
- self.ur5DepthCallback,
+ '/ur5_right_camera/image_raw',
+ self.ur5RightRgbCallback,
1
)
- self.panda_camera_color_subscription_ = self.create_subscription(
+ self.ur5_right_camera_depth_subscription_ = self.create_subscription(
Image,
- '/panda_camera/image_raw',
- self.pandaRgbCallback,
+ '/ur5_right_camera/depth/image_raw',
+ self.ur5RightDepthCallback,
1
)
- self.panda_camera_depth_subscription_ = self.create_subscription(
+ self.panda_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/image_raw',
+ self.pandaRightRgbCallback,
+ 1
+ )
+
+ self.panda_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/depth/image_raw',
+ self.pandaRightDepthCallback,
+ 1
+ )
+
+ self.panda_left_no_gripper_rgb_ = None
+ self.panda_left_no_gripper_depth_ = None
+ self.panda_right_no_gripper_rgb_ = None
+ self.panda_right_no_gripper_depth_ = None
+ self.panda_no_gripper_left_camera_color_subscription_ = self.create_subscription(
Image,
- '/panda_camera/depth/image_raw',
- self.pandaDepthCallback,
+ '/panda_no_gripper_left_camera/image_raw',
+ self.pandaLeftNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_left_camera/depth/image_raw',
+ self.pandaLeftNoGripperDepthCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/image_raw',
+ self.pandaRightNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_no_gripper_right_camera/depth/image_raw',
+ self.pandaRightNoGripperDepthCallback,
1
)
@@ -179,58 +226,81 @@ def __init__(self):
timer_period = 0.5
self.links_info_ = []
self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
+ self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
#self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
+ self.full_mask_image_publisher_ = self.create_publisher(MultipleInpaintImages,"full_mask_image",1)
self.updated_joints_ = False
self.is_ready_ = True
- def ur5RgbCallback(self,msg):
- self.ur5_rgb_ = msg
+ def ur5LeftRgbCallback(self,msg):
+ self.ur5_left_rgb_ = msg
+
+ def ur5LeftDepthCallback(self,msg):
+ self.ur5_left_depth_ = msg
+
+ def pandaLeftRgbCallback(self,msg):
+ self.panda_left_rgb_ = msg
+
+ def pandaLeftDepthCallback(self,msg):
+ self.panda_left_depth_ = msg
+
+ def ur5RightRgbCallback(self,msg):
+ self.ur5_right_rgb_ = msg
- def ur5DepthCallback(self,msg):
- self.ur5_depth_ = msg
+ def ur5RightDepthCallback(self,msg):
+ self.ur5_right_depth_ = msg
- def pandaRgbCallback(self,msg):
- self.panda_rgb_ = msg
+ def pandaRightRgbCallback(self,msg):
+ self.panda_right_rgb_ = msg
- def pandaDepthCallback(self,msg):
- self.panda_depth_ = msg
+ def pandaRightDepthCallback(self,msg):
+ self.panda_right_depth_ = msg
+
+ def pandaLeftNoGripperRgbCallback(self,msg):
+ self.panda_left_no_gripper_rgb_ = msg
+
+ def pandaLeftNoGripperDepthCallback(self,msg):
+ self.panda_left_no_gripper_depth_ = msg
+
+ def pandaRightNoGripperRgbCallback(self,msg):
+ self.panda_right_no_gripper_rgb_ = msg
+
+ def pandaRightNoGripperDepthCallback(self,msg):
+ self.panda_right_no_gripper_depth_ = msg
def noTimeGazeboCallback(self,joint_msg):
start_time = time.time()
- real_rgb = self.ur5_rgb_
- real_depth = self.ur5_depth_
- gazebo_rgb = self.panda_rgb_
- gazebo_depth = self.panda_depth_
+ left_inpainted_image,left_mask = self.doFullInpainting(self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_rgb_,self.panda_left_depth_,self.panda_left_no_gripper_depth_,self.left_real_rgb_,self.left_real_depth_,is_wrist=True)
+ right_inpainted_image,right_mask = self.doFullInpainting(self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_rgb_,self.panda_right_depth_,self.panda_right_no_gripper_depth_,self.right_real_rgb_,self.right_real_depth_,is_wrist=False)
+ left_inpainted_image = cv2.flip(left_inpainted_image,0)
+ left_mask = cv2.flip(left_mask,0)
+ inpainted_image_msg = MultipleInpaintImages()
+ mask_image_msg = MultipleInpaintImages()
+ inpainted_image_msg.images = [self.cv_bridge_.cv2_to_imgmsg(left_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(right_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(left_mask),self.cv_bridge_.cv2_to_imgmsg(right_mask)]
+ mask_image_msg.images = []
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.full_mask_image_publisher_.publish(mask_image_msg)
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('left_inpainting'):
+ os.makedirs('left_inpainting')
+ if not os.path.exists('right_inpainting'):
+ os.makedirs('right_inpainting')
+ if not os.path.exists('left_mask'):
+ os.makedirs('left_mask')
+ if not os.path.exists('right_mask'):
+ os.makedirs('right_mask')
+ if(self.float_image_):
+ left_inpainted_image = (left_inpainted_image * 255).astype(np.uint8)
+ right_inpainted_image = (right_inpainted_image * 255).astype(np.uint8)
+ left_mask = (left_mask * 255).astype(np.uint8)
+ right_mask = (right_mask * 255).astype(np.uint8)
+ cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
+ cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
+ cv2.imwrite('left_mask/mask'+ str(inpaint_number) +'.png',left_mask)
+ cv2.imwrite('right_mask/mask'+ str(inpaint_number) +'.png',right_mask)
+
+ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth,gazebo_no_gripper_depth,world_rgb,world_depth,is_wrist=True):
real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
@@ -240,6 +310,7 @@ def noTimeGazeboCallback(self,joint_msg):
cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
+ gazebo_no_gripper_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_no_gripper_depth)
real_seg_np = (real_depth_np < 8).astype(np.uint8)
real_seg_255_np = 255 * real_seg_np
@@ -247,16 +318,94 @@ def noTimeGazeboCallback(self,joint_msg):
gazebo_seg_255_np = 255 * gazebo_seg_np
cv2.imwrite('real_seg.png',real_seg_255_np)
cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
+ real_no_gripper_seg_255_np = (gazebo_no_gripper_depth_np < 8).astype(np.uint8)
+ real_no_gripper_seg_255_np = 255 * real_no_gripper_seg_255_np
+ real_rgb = world_rgb
+ real_depth = world_depth
real_seg = real_seg_255_np
gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
+ return self.dummyInpaintingEarly(real_rgb,real_seg,real_no_gripper_seg_255_np,gazebo_rgb_np,gazebo_seg_255_np,is_wrist)
+
+ def dummyInpaintingEarly(self,rgb,seg_file,seg_file_no_gripper,gazebo_rgb,gazebo_seg,is_wrist=True):
+
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+ if is_wrist:
+ cv2.imwrite('seg_file.png',seg_file)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=30)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ else:
+ _, seg_file_no_gripper = cv2.threshold(seg_file_no_gripper, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('seg_file.png',seg_file)
+ cv2.imwrite('seg_file_no_gripper.png',seg_file_no_gripper)
+ seg_file_gripper = abs(seg_file - seg_file_no_gripper)
+ seg_file_gripper = cv2.erode(seg_file_gripper,np.ones((3,3),np.uint8),iterations=3)
+ seg_file_gripper = cv2.dilate(seg_file_gripper,np.ones((3,3),np.uint8),iterations=15)
+ cv2.imwrite('seg_file_gripper.png',seg_file_gripper)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file_no_gripper)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=30)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255 + seg_file_gripper
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ if(is_wrist):
+ pixel_shift = 50
+ chopped = gazebo_only[pixel_shift:,:]
+ black_rows = np.zeros((pixel_shift, gazebo_only.shape[1], 3), dtype=np.uint8)
+ gazebo_only = np.vstack((chopped,black_rows))
+
+ chopped_mask = gazebo_segmentation_mask_255[pixel_shift:,:]
+ black_rows_mask = np.zeros((pixel_shift, gazebo_segmentation_mask_255.shape[1]), dtype=np.uint8)
+ gazebo_segmentation_mask_255 = np.vstack((chopped_mask,black_rows_mask))
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre3.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ diffusion_input = cv2.bitwise_and(inpainted_image,inpainted_image,mask=cv2.bitwise_not(better_dilated_blend_mask))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ diffusion_input = (diffusion_input / 255.0).astype(np.float32)
+ return inpainted_image,diffusion_input
def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
return
@@ -479,6 +628,8 @@ def convertPointcloudToDepth(self,pointcloud):
def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
# rgb = cv2.resize(rgb,(128,128))
# seg_file = cv2.resize(seg_file,(128,128))
# gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
@@ -503,18 +654,21 @@ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
#cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
- #cv2.imwrite('background_only.png',background_only)
+ cv2.imwrite('background_only.png',background_only)
inpainted_image = gazebo_only + background_only
#cv2.imwrite('no_fill.png',inpainted_image)
better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ cv2.imwrite('better_dilated_blend_mask.png',better_dilated_blend_mask)
+ cv2.imwrite('pre_inpainted_image.png',inpainted_image)
cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
target_color = (39,43,44)
target_mask = np.zeros_like(inpainted_image)
target_mask[:,:] = target_color
inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- # inpainted_image = cv2_inpaint_image
+ inpainted_image = cv2_inpaint_image
+ cv2.imwrite('og_inpainted_image.png',inpainted_image)
inpaint_number = str(self.i_).zfill(5)
if not os.path.exists('inpainting'):
os.makedirs('inpainting')
@@ -523,11 +677,14 @@ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
if(self.float_image_):
- import pdb
- pdb.set_trace()
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image,encoding="bgr8")
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
+ inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
+ mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
+ inpainted_image_msgs = MultipleInpaintImages()
+ inpainted_image_msgs.images = [inpainted_image_msg,inpainted_image_msg]
+ self.inpainted_publisher_.publish(inpainted_image_msgs)
self.mask_image_publisher_.publish(mask_image_msg)
# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
@@ -1002,7 +1159,7 @@ def listenerCallback(self,msg):
ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
scipy_rotation = R.from_matrix(ee_pose[:3,:3])
scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
self.q_init_ = qout
# Hardcoded gripper
qout_list = qout.tolist()
@@ -1016,71 +1173,79 @@ def listenerCallback(self,msg):
end_time = time.time()
def listenerCallbackOnlineDebug(self,msg):
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(msg.rgb)
- if(rgb_np.dtype == np.float32):
+ if(len(msg.data_pieces) == 2):
+ start_time = time.time()
+ self.i_ += 1
+ online_input_folder = 'offline_ur5e_input'
+ if not os.path.exists(online_input_folder):
+ os.makedirs(online_input_folder)
+ online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
+ if not os.path.exists(online_input_num_folder):
+ os.makedirs(online_input_num_folder)
+ left_msg = msg.data_pieces[0]
+ right_msg = msg.data_pieces[1]
+ elif(len(msg.data_pieces) == 1):
+ start_time = time.time()
+ left_msg = msg.data_pieces[0]
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ if(left_rgb_np.dtype == np.float32):
self.float_image_ = True
- rgb_np = (rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- cv2.imwrite(online_input_num_folder+'/rgb.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.rgb.height,msg.rgb.width))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- cv2.imwrite('inf_mask.png',np.isinf(depth_np).astype(np.uint8)*255)
- cv2.imwrite('nan_mask.png',np.isnan(depth_np).astype(np.uint8)*255)
- depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
- depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
- depth_np[np.isnan(depth_np)] = 0
- cv2.imwrite('og_depth.png',self.normalize_depth_image(depth_np))
- ur5_joints = np.array(msg.joints)
- ur5_joints[0] += math.pi
- ee_pose = self.ur5_solver_.fk(ur5_joints)
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
+ left_rgb_np = cv2.flip(left_rgb_np,0)
+
+ right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
+
+ if(right_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
+
+ panda_ur5_gripper_joints = np.array(left_msg.joints)
+ real_ee_gripper_joint = panda_ur5_gripper_joints[-1]
+ panda_ur5_gripper_joints = panda_ur5_gripper_joints[:-1]
+
+ ee_pose = self.panda_ur5_gripper_solver_.fk(panda_ur5_gripper_joints)
end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
ee_pose = ee_pose @ end_effector_rotation_with_no_translation
scipy_rotation = R.from_matrix(ee_pose[:3,:3])
scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
b_xyz = 1e-5
b_rpy = 1e-3
while(qout is None):
b_xyz *= 10
b_rpy *= 10
- qout = self.ur5_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
if(b_xyz == 0.01):
print("Couldn't find good IK")
qout = self.q_init_
print("Bound xyz: " + str(b_xyz))
self.q_init_ = qout
- ur5_arm_joints = ur5_joints.tolist()
- ur5_gripper_joint = 0.0
- ur5_gripper_joints = [ur5_gripper_joint] * 6
+ panda_ur5_arm_joints = panda_ur5_gripper_joints.tolist()
+ panda_ur5_gripper_joint = 0.8 * real_ee_gripper_joint
+ panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
panda_arm_joints = qout.tolist()
- panda_gripper_joint = 0.04
+ panda_gripper_joint = -0.04 * real_ee_gripper_joint + 0.04
panda_gripper_joints = [panda_gripper_joint] * 2
full_joint_list = []
-
- full_joint_list.extend(ur5_arm_joints)
- full_joint_list.extend(ur5_gripper_joints)
+
full_joint_list.extend(panda_arm_joints)
+ full_joint_list.extend(panda_ur5_arm_joints)
full_joint_list.extend(panda_gripper_joints)
-
+ full_joint_list.extend(panda_ur5_gripper_joints)
qout_msg = Float64MultiArray()
qout_msg.data = full_joint_list
self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.real_rgb_ = msg.rgb
- self.real_depth_ = depth_np
+ self.left_real_rgb_ = self.cv_bridge_.cv2_to_imgmsg(left_rgb_np)
+ self.right_real_rgb_ = self.cv_bridge_.cv2_to_imgmsg(right_rgb_np)
+
+ self.left_real_depth_ = None
+ self.right_real_depth_ = None
self.real_qout_list_ = full_joint_list
end_time = time.time()
print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
+
# else:
- # qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
+ # qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
# print("WARNING HIT IK ON FRANKA ERROR")
@@ -1107,7 +1272,7 @@ def listenerCallbackData(self,msg):
joint_array = joint_array[:-1]
ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
+ qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
self.q_init_ = qout
# Hardcoded gripper
@@ -1137,7 +1302,7 @@ def joint_commands_callback(self, msg):
# msg.data.append(gripper_val)
self.joint_state_msg.position = msg.data
self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
+ # self.fks_ = self.chain_.forward_kinematics(self.thetas_)
self.panda_joint_state_publisher_.publish(self.joint_state_msg)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_real_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_real_better.py
deleted file mode 100644
index 68e9318..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_real_better.py
+++ /dev/null
@@ -1,3511 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray,Bool
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-from message_filters import ApproximateTimeSynchronizer, Subscriber
-import pathlib
-
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = True
- self.float_image_ = False
- file_directory = pathlib.Path(__file__).parent.resolve()
- self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
- self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
- self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
- self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
- self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
- self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
- self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
- self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
- self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.ur5_rgb_image_subscriber_,self.ur5_depth_image_subscriber_,self.panda_rgb_image_subscriber_,self.panda_depth_image_subscriber_],1,0.2)
- self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
- self.subscription_ = self.create_subscription(
- InputFilesRobosuite,
- '/input_files',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesRealDataMulti,
- 'input_files_data_real',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.real_rgb_ = None
- self.real_depth_ = None
- self.real_seg_ = None
- self.real_qout_list_ = None
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
-
- self.ur5_rgb_ = None
- self.ur5_depth_ = None
- self.panda_rgb_ = None
- self.panda_depth_ = None
- self.ur5_camera_color_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/image_raw',
- self.ur5RgbCallback,
- 1
- )
-
- self.ur5_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/depth/image_raw',
- self.ur5DepthCallback,
- 1
- )
-
- self.panda_camera_color_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/image_raw',
- self.pandaRgbCallback,
- 1
- )
-
- self.panda_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/depth/image_raw',
- self.pandaDepthCallback,
- 1
- )
-
- self.joint_subscription_ = self.create_subscription(
- JointState,
- '/gazebo_joint_states',
- self.noTimeGazeboCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.updated_joints_ = False
- self.is_ready_ = True
-
- def ur5RgbCallback(self,msg):
- self.ur5_rgb_ = msg
-
- def ur5DepthCallback(self,msg):
- self.ur5_depth_ = msg
-
- def pandaRgbCallback(self,msg):
- self.panda_rgb_ = msg
-
- def pandaDepthCallback(self,msg):
- self.panda_depth_ = msg
-
- def noTimeGazeboCallback(self,joint_msg):
- start_time = time.time()
- real_rgb = self.ur5_rgb_
- real_depth = self.ur5_depth_
- gazebo_rgb = self.panda_rgb_
- gazebo_depth = self.panda_depth_
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
-
- def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
- return
- start_time = time.time()
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
- #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
-
- #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
- # self.updated_joints_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/lawrence/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
-
- depth_data = pointcloud_np[:,2]
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
- rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
- if(rgb.dtype == np.float32):
- rgb = (rgb * 255).astype(np.uint8)
- # rgb = cv2.resize(rgb,(128,128))
- # seg_file = cv2.resize(seg_file,(128,128))
- # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
- gazebo_segmentation_mask_255 = gazebo_seg
- inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
- cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
- inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
- outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
- # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
- #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
- #gazebo_robot_only_lab[:,:,0] += 10
- #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
- #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- cv2.imwrite('gazebo_robot_only.png',gazebo_only)
- background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
- inverted_seg_file_original = cv2.bitwise_not(seg_file)
- #cv2.imwrite('inverted_seg_file_original.png',inverted_seg_file_original)
- inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
- #cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
- background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
- cv2.imwrite('background_only.png',background_only)
- inpainted_image = gazebo_only + background_only
- #cv2.imwrite('no_fill.png',inpainted_image)
-
- better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
- cv2.imwrite('better_dilated_blend_mask.png',better_dilated_blend_mask)
- cv2.imwrite('pre_inpainted_image.png',inpainted_image)
- cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
-
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- inpainted_image = cv2_inpaint_image
- cv2.imwrite('og_inpainted_image.png',inpainted_image)
- import pdb
- pdb.set_trace()
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
- if(self.float_image_):
- inpainted_image = (inpainted_image / 255.0).astype(np.float32)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
- inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
- mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
- inpainted_image_msgs = MultipleInpaintImages()
- inpainted_image_msgs.images = [inpainted_image_msg,inpainted_image_msg]
- self.inpainted_publisher_.publish(inpainted_image_msgs)
- self.mask_image_publisher_.publish(mask_image_msg)
-
-# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
-# better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
-# target_color = (39,43,44)
-# target_mask = np.zeros_like(inpainted_image)
-# target_mask[:,:] = target_color
-# got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
-# #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
-# image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
-# image_8bit = got_away
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- return
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- depth_np = depth
- seg = seg_file
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- #rgb_np = cv2.resize(rgb_np,(128,128))
- #depth_np = cv2.resize(depth_np,(128,128))
- #seg = cv2.resize(seg,(128,128))
- #gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- #gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- #gazebo_depth = cv2.resize(gazebo_depth,(128,128))
- real_depth_image_unmasked = depth_np
- real_rgb_image_unmasked = rgb_np
- real_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
- inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_real_segmentation_mask_255.png',inverted_real_segmentation_mask_255)
- outline_mask = abs(inverted_real_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
-
- # real_rgb_image_masked = cv2.inpaint(real_rgb_image_masked,outline_mask,3,cv2.INPAINT_TELEA)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- real_rgb_image_masked_inpaint = real_rgb_image_masked
-
-
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 100
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
-
- inpainted_image = cv2.add(attempt,attempt2)#attempt + attempt2
-
- better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
- better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- image_8bit = got_away
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',image_8bit)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_blend_mask.astype(np.uint8))
- # cv2.imwrite('small_inpaint.png',cv2.resize(image_8bit,(128,128)))
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_blend_mask.astype(np.uint8),encoding="mono8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- self.mask_image_publisher_.publish(mask_image_msg)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
-
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def replace_nan_with_neighbors_average(self,array):
- def replace_function(subarray):
- center = subarray[4]
- if(center != np.nan):
- return center
- valid_values = subarray[subarray != -np.inf]
- if valid_values.size == 0:
- return np.nan
- else:
- return np.nanmean(valid_values)
-
- return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.offline_ = True
- start_time = time.time()
- # path_array = msg.rgb.split('/')
- # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- rgb_image = cv2.imread(msg.rgb)
- # cv2.imwrite(input_rgb_path,rgb_image)
- rgb_array = rgb_image.flatten().tolist()
- depth_image = np.load(msg.depth)
- # path_array = msg.depth.split('/')
- # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_depth_path,depth_image)
- depth_array = depth_image.flatten().tolist()
- self.seg_file_ = msg.segmentation
- segmentation_image = cv2.imread(msg.segmentation)
- # path_array = msg.segmentation.split('/')
- # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # cv2.imwrite(input_segmentation_path,segmentation_image)
- segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- joint_array = np.load(msg.joints).tolist()
- # path_array = msg.joints.split('/')
- # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_joints_path,np.array(joint_array))
- input_file_robosuite_data_msg = InputFilesRobosuiteData()
- input_file_robosuite_data_msg.rgb = rgb_array
- input_file_robosuite_data_msg.depth_map = depth_array
- input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- input_file_robosuite_data_msg.joints = joint_array
- self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- return
- rgb = msg.rgb
- depth = msg.depth
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.load(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb,depth,segmentation)
- end_time = time.time()
-
- def listenerCallbackOnlineDebug(self,msg):
- msg = msg.data_pieces[0]
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(msg.rgb)
- if(rgb_np.dtype == np.float32):
- self.float_image_ = True
- rgb_np = (rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- if not os.path.exists('original_rgb'):
- os.makedirs('original_rgb')
- cv2.imwrite('original_rgb/rgb' + str(self.i_) +'.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.rgb.height,msg.rgb.width))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- cv2.imwrite('inf_mask.png',np.isinf(depth_np).astype(np.uint8)*255)
- cv2.imwrite('nan_mask.png',np.isnan(depth_np).astype(np.uint8)*255)
- depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
- depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
- depth_np[np.isnan(depth_np)] = 0
- cv2.imwrite('og_depth.png',self.normalize_depth_image(depth_np))
- panda_ur5_gripper_joints = np.array(msg.joints)
- panda_ur5_gripper_joints = panda_ur5_gripper_joints[:-1]
- ee_pose = self.panda_ur5_gripper_solver_.fk(panda_ur5_gripper_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
- ee_pose = ee_pose @ end_effector_rotation_with_no_translation
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- b_xyz = 1e-5
- b_rpy = 1e-3
- while(qout is None):
- b_xyz *= 10
- b_rpy *= 10
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
- print("Couldn't find good IK")
- qout = self.q_init_
- print("Bound xyz: " + str(b_xyz))
- self.q_init_ = qout
- panda_ur5_arm_joints = panda_ur5_gripper_joints.tolist()
- panda_ur5_gripper_joint = 0.0
- panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
- panda_arm_joints = qout.tolist()
- panda_gripper_joint = 0.04
- panda_gripper_joints = [panda_gripper_joint] * 2
- full_joint_list = []
-
- full_joint_list.extend(panda_arm_joints)
- full_joint_list.extend(panda_ur5_arm_joints)
- qout_msg = Float64MultiArray()
- qout_msg.data = full_joint_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.real_rgb_ = msg.rgb
- self.real_depth_ = depth_np
- self.real_qout_list_ = full_joint_list
- end_time = time.time()
- print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
- # else:
- # qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
- # print("WARNING HIT IK ON FRANKA ERROR")
-
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray,Bool
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-from message_filters import ApproximateTimeSynchronizer, Subscriber
-import pathlib
-
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = True
- self.float_image_ = False
- file_directory = pathlib.Path(__file__).parent.resolve()
- self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
- self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
- self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
- self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
- self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
- self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
- self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
- self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
- self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.ur5_rgb_image_subscriber_,self.ur5_depth_image_subscriber_,self.panda_rgb_image_subscriber_,self.panda_depth_image_subscriber_],1,0.2)
- self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
- self.subscription_ = self.create_subscription(
- InputFilesRobosuite,
- '/input_files',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesRealDataMulti,
- 'input_files_data_real',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.real_rgb_ = None
- self.real_depth_ = None
- self.real_seg_ = None
- self.real_qout_list_ = None
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
-
- self.ur5_rgb_ = None
- self.ur5_depth_ = None
- self.panda_rgb_ = None
- self.panda_depth_ = None
- self.ur5_camera_color_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/image_raw',
- self.ur5RgbCallback,
- 1
- )
-
- self.ur5_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/depth/image_raw',
- self.ur5DepthCallback,
- 1
- )
-
- self.panda_camera_color_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/image_raw',
- self.pandaRgbCallback,
- 1
- )
-
- self.panda_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/depth/image_raw',
- self.pandaDepthCallback,
- 1
- )
-
- self.joint_subscription_ = self.create_subscription(
- JointState,
- '/gazebo_joint_states',
- self.noTimeGazeboCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.updated_joints_ = False
- self.is_ready_ = True
-
- def ur5RgbCallback(self,msg):
- self.ur5_rgb_ = msg
-
- def ur5DepthCallback(self,msg):
- self.ur5_depth_ = msg
-
- def pandaRgbCallback(self,msg):
- self.panda_rgb_ = msg
-
- def pandaDepthCallback(self,msg):
- self.panda_depth_ = msg
-
- def noTimeGazeboCallback(self,joint_msg):
- start_time = time.time()
- real_rgb = self.ur5_rgb_
- real_depth = self.ur5_depth_
- gazebo_rgb = self.panda_rgb_
- gazebo_depth = self.panda_depth_
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
-
- def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
- return
- start_time = time.time()
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
- #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
-
- #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
- # self.updated_joints_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/lawrence/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
-
- depth_data = pointcloud_np[:,2]
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
- rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
- if(rgb.dtype == np.float32):
- rgb = (rgb * 255).astype(np.uint8)
- # rgb = cv2.resize(rgb,(128,128))
- # seg_file = cv2.resize(seg_file,(128,128))
- # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
- gazebo_segmentation_mask_255 = gazebo_seg
- inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
- cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
- inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
- outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
- # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
- #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
- #gazebo_robot_only_lab[:,:,0] += 10
- #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
- #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- cv2.imwrite('gazebo_robot_only.png',gazebo_only)
- background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
- inverted_seg_file_original = cv2.bitwise_not(seg_file)
- #cv2.imwrite('inverted_seg_file_original.png',inverted_seg_file_original)
- inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
- #cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
- background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
- cv2.imwrite('background_only.png',background_only)
- inpainted_image = gazebo_only + background_only
- #cv2.imwrite('no_fill.png',inpainted_image)
-
- better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
- cv2.imwrite('better_dilated_blend_mask.png',better_dilated_blend_mask)
- cv2.imwrite('pre_inpainted_image.png',inpainted_image)
- cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
-
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- inpainted_image = cv2_inpaint_image
- cv2.imwrite('og_inpainted_image.png',inpainted_image)
- import pdb
- pdb.set_trace()
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
- if(self.float_image_):
- inpainted_image = (inpainted_image / 255.0).astype(np.float32)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
- inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
- mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
- inpainted_image_msgs = MultipleInpaintImages()
- inpainted_image_msgs.images = [inpainted_image_msg,inpainted_image_msg]
- self.inpainted_publisher_.publish(inpainted_image_msgs)
- self.mask_image_publisher_.publish(mask_image_msg)
-
-# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
-# better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
-# target_color = (39,43,44)
-# target_mask = np.zeros_like(inpainted_image)
-# target_mask[:,:] = target_color
-# got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
-# #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
-# image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
-# image_8bit = got_away
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- return
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- depth_np = depth
- seg = seg_file
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- #rgb_np = cv2.resize(rgb_np,(128,128))
- #depth_np = cv2.resize(depth_np,(128,128))
- #seg = cv2.resize(seg,(128,128))
- #gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- #gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- #gazebo_depth = cv2.resize(gazebo_depth,(128,128))
- real_depth_image_unmasked = depth_np
- real_rgb_image_unmasked = rgb_np
- real_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
- inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_real_segmentation_mask_255.png',inverted_real_segmentation_mask_255)
- outline_mask = abs(inverted_real_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
-
- # real_rgb_image_masked = cv2.inpaint(real_rgb_image_masked,outline_mask,3,cv2.INPAINT_TELEA)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- real_rgb_image_masked_inpaint = real_rgb_image_masked
-
-
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 100
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
-
- inpainted_image = cv2.add(attempt,attempt2)#attempt + attempt2
-
- better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
- better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- image_8bit = got_away
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',image_8bit)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_blend_mask.astype(np.uint8))
- # cv2.imwrite('small_inpaint.png',cv2.resize(image_8bit,(128,128)))
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_blend_mask.astype(np.uint8),encoding="mono8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- self.mask_image_publisher_.publish(mask_image_msg)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
-
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def replace_nan_with_neighbors_average(self,array):
- def replace_function(subarray):
- center = subarray[4]
- if(center != np.nan):
- return center
- valid_values = subarray[subarray != -np.inf]
- if valid_values.size == 0:
- return np.nan
- else:
- return np.nanmean(valid_values)
-
- return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.offline_ = True
- start_time = time.time()
- # path_array = msg.rgb.split('/')
- # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- rgb_image = cv2.imread(msg.rgb)
- # cv2.imwrite(input_rgb_path,rgb_image)
- rgb_array = rgb_image.flatten().tolist()
- depth_image = np.load(msg.depth)
- # path_array = msg.depth.split('/')
- # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_depth_path,depth_image)
- depth_array = depth_image.flatten().tolist()
- self.seg_file_ = msg.segmentation
- segmentation_image = cv2.imread(msg.segmentation)
- # path_array = msg.segmentation.split('/')
- # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # cv2.imwrite(input_segmentation_path,segmentation_image)
- segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- joint_array = np.load(msg.joints).tolist()
- # path_array = msg.joints.split('/')
- # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_joints_path,np.array(joint_array))
- input_file_robosuite_data_msg = InputFilesRobosuiteData()
- input_file_robosuite_data_msg.rgb = rgb_array
- input_file_robosuite_data_msg.depth_map = depth_array
- input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- input_file_robosuite_data_msg.joints = joint_array
- self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- return
- rgb = msg.rgb
- depth = msg.depth
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.load(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb,depth,segmentation)
- end_time = time.time()
-
- def listenerCallbackOnlineDebug(self,msg):
- msg = msg.data_pieces[0]
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(msg.rgb)
- if(rgb_np.dtype == np.float32):
- self.float_image_ = True
- rgb_np = (rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- if not os.path.exists('original_rgb'):
- os.makedirs('original_rgb')
- cv2.imwrite('original_rgb/rgb' + str(self.i_) +'.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.rgb.height,msg.rgb.width))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- cv2.imwrite('inf_mask.png',np.isinf(depth_np).astype(np.uint8)*255)
- cv2.imwrite('nan_mask.png',np.isnan(depth_np).astype(np.uint8)*255)
- depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
- depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
- depth_np[np.isnan(depth_np)] = 0
- cv2.imwrite('og_depth.png',self.normalize_depth_image(depth_np))
- panda_ur5_gripper_joints = np.array(msg.joints)
- panda_ur5_gripper_joints = panda_ur5_gripper_joints[:-1]
- ee_pose = self.panda_ur5_gripper_solver_.fk(panda_ur5_gripper_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
- ee_pose = ee_pose @ end_effector_rotation_with_no_translation
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- b_xyz = 1e-5
- b_rpy = 1e-3
- while(qout is None):
- b_xyz *= 10
- b_rpy *= 10
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
- print("Couldn't find good IK")
- qout = self.q_init_
- print("Bound xyz: " + str(b_xyz))
- self.q_init_ = qout
- panda_ur5_arm_joints = panda_ur5_gripper_joints.tolist()
- panda_ur5_gripper_joint = 0.0
- panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
- panda_arm_joints = qout.tolist()
- panda_gripper_joint = 0.04
- panda_gripper_joints = [panda_gripper_joint] * 2
- full_joint_list = []
-
- full_joint_list.extend(panda_arm_joints)
- full_joint_list.extend(panda_ur5_arm_joints)
- qout_msg = Float64MultiArray()
- qout_msg.data = full_joint_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.real_rgb_ = msg.rgb
- self.real_depth_ = depth_np
- self.real_qout_list_ = full_joint_list
- end_time = time.time()
- print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
- # else:
- # qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
- # print("WARNING HIT IK ON FRANKA ERROR")
-
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray,Bool
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-from message_filters import ApproximateTimeSynchronizer, Subscriber
-import pathlib
-
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = True
- self.float_image_ = False
- file_directory = pathlib.Path(__file__).parent.resolve()
- self.panda_panda_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
- self.panda_panda_gripper_solver_ = TracIKSolver(self.panda_panda_gripper_urdf_,"panda_link0","panda_ee")
- self.panda_ur5_gripper_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_panda_gripper_urdf_).read())
- self.panda_ur5_gripper_solver_ = TracIKSolver(self.panda_ur5_gripper_urdf_,"panda_with_ur5_gripper_link0","ur5_ee_gripper")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
- self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
- self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
- self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
- self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
- self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.ur5_rgb_image_subscriber_,self.ur5_depth_image_subscriber_,self.panda_rgb_image_subscriber_,self.panda_depth_image_subscriber_],1,0.2)
- self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
- self.subscription_ = self.create_subscription(
- InputFilesRobosuite,
- '/input_files',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesRealDataMulti,
- 'input_files_data_real',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.real_rgb_ = None
- self.real_depth_ = None
- self.real_seg_ = None
- self.real_qout_list_ = None
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
-
- self.ur5_rgb_ = None
- self.ur5_depth_ = None
- self.panda_rgb_ = None
- self.panda_depth_ = None
- self.ur5_camera_color_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/image_raw',
- self.ur5RgbCallback,
- 1
- )
-
- self.ur5_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/ur5_camera/depth/image_raw',
- self.ur5DepthCallback,
- 1
- )
-
- self.panda_camera_color_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/image_raw',
- self.pandaRgbCallback,
- 1
- )
-
- self.panda_camera_depth_subscription_ = self.create_subscription(
- Image,
- '/panda_camera/depth/image_raw',
- self.pandaDepthCallback,
- 1
- )
-
- self.joint_subscription_ = self.create_subscription(
- JointState,
- '/gazebo_joint_states',
- self.noTimeGazeboCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.updated_joints_ = False
- self.is_ready_ = True
-
- def ur5RgbCallback(self,msg):
- self.ur5_rgb_ = msg
-
- def ur5DepthCallback(self,msg):
- self.ur5_depth_ = msg
-
- def pandaRgbCallback(self,msg):
- self.panda_rgb_ = msg
-
- def pandaDepthCallback(self,msg):
- self.panda_depth_ = msg
-
- def noTimeGazeboCallback(self,joint_msg):
- start_time = time.time()
- real_rgb = self.ur5_rgb_
- real_depth = self.ur5_depth_
- gazebo_rgb = self.panda_rgb_
- gazebo_depth = self.panda_depth_
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
-
- def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
- return
- start_time = time.time()
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
- return
- #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
-
- #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
- # self.updated_joints_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/lawrence/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
-
- depth_data = pointcloud_np[:,2]
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
- rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
- if(rgb.dtype == np.float32):
- rgb = (rgb * 255).astype(np.uint8)
- # rgb = cv2.resize(rgb,(128,128))
- # seg_file = cv2.resize(seg_file,(128,128))
- # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
- gazebo_segmentation_mask_255 = gazebo_seg
- inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
- cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
- inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
- outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
- # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
- #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
- #gazebo_robot_only_lab[:,:,0] += 10
- #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
- #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- cv2.imwrite('gazebo_robot_only.png',gazebo_only)
- background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
- inverted_seg_file_original = cv2.bitwise_not(seg_file)
- #cv2.imwrite('inverted_seg_file_original.png',inverted_seg_file_original)
- inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
- #cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
- background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
- cv2.imwrite('background_only.png',background_only)
- inpainted_image = gazebo_only + background_only
- #cv2.imwrite('no_fill.png',inpainted_image)
-
- better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
- cv2.imwrite('better_dilated_blend_mask.png',better_dilated_blend_mask)
- cv2.imwrite('pre_inpainted_image.png',inpainted_image)
- cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
-
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- inpainted_image = cv2_inpaint_image
- cv2.imwrite('og_inpainted_image.png',inpainted_image)
- import pdb
- pdb.set_trace()
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
- if(self.float_image_):
- inpainted_image = (inpainted_image / 255.0).astype(np.float32)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
- inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
- mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
- inpainted_image_msgs = MultipleInpaintImages()
- inpainted_image_msgs.images = [inpainted_image_msg,inpainted_image_msg]
- self.inpainted_publisher_.publish(inpainted_image_msgs)
- self.mask_image_publisher_.publish(mask_image_msg)
-
-# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
-# better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
-# target_color = (39,43,44)
-# target_mask = np.zeros_like(inpainted_image)
-# target_mask[:,:] = target_color
-# got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
-# #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
-# image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
-# image_8bit = got_away
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- return
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- depth_np = depth
- seg = seg_file
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- #rgb_np = cv2.resize(rgb_np,(128,128))
- #depth_np = cv2.resize(depth_np,(128,128))
- #seg = cv2.resize(seg,(128,128))
- #gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
- #gazebo_seg = cv2.resize(gazebo_seg,(128,128))
- #gazebo_depth = cv2.resize(gazebo_depth,(128,128))
- real_depth_image_unmasked = depth_np
- real_rgb_image_unmasked = rgb_np
- real_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
- inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
- cv2.imwrite('inverted_real_segmentation_mask_255.png',inverted_real_segmentation_mask_255)
- outline_mask = abs(inverted_real_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
-
- # real_rgb_image_masked = cv2.inpaint(real_rgb_image_masked,outline_mask,3,cv2.INPAINT_TELEA)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- real_rgb_image_masked_inpaint = real_rgb_image_masked
-
-
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 100
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
-
- inpainted_image = cv2.add(attempt,attempt2)#attempt + attempt2
-
- better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
- better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
- target_color = (39,43,44)
- target_mask = np.zeros_like(inpainted_image)
- target_mask[:,:] = target_color
- got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
- #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- image_8bit = got_away
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('inpainting'):
- os.makedirs('inpainting')
- if not os.path.exists('mask'):
- os.makedirs('mask')
- cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',image_8bit)
- cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_blend_mask.astype(np.uint8))
- # cv2.imwrite('small_inpaint.png',cv2.resize(image_8bit,(128,128)))
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_blend_mask.astype(np.uint8),encoding="mono8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- self.mask_image_publisher_.publish(mask_image_msg)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
-
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
- real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
- real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def replace_nan_with_neighbors_average(self,array):
- def replace_function(subarray):
- center = subarray[4]
- if(center != np.nan):
- return center
- valid_values = subarray[subarray != -np.inf]
- if valid_values.size == 0:
- return np.nan
- else:
- return np.nanmean(valid_values)
-
- return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.offline_ = True
- start_time = time.time()
- # path_array = msg.rgb.split('/')
- # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- rgb_image = cv2.imread(msg.rgb)
- # cv2.imwrite(input_rgb_path,rgb_image)
- rgb_array = rgb_image.flatten().tolist()
- depth_image = np.load(msg.depth)
- # path_array = msg.depth.split('/')
- # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_depth_path,depth_image)
- depth_array = depth_image.flatten().tolist()
- self.seg_file_ = msg.segmentation
- segmentation_image = cv2.imread(msg.segmentation)
- # path_array = msg.segmentation.split('/')
- # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # cv2.imwrite(input_segmentation_path,segmentation_image)
- segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- joint_array = np.load(msg.joints).tolist()
- # path_array = msg.joints.split('/')
- # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_joints_path,np.array(joint_array))
- input_file_robosuite_data_msg = InputFilesRobosuiteData()
- input_file_robosuite_data_msg.rgb = rgb_array
- input_file_robosuite_data_msg.depth_map = depth_array
- input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- input_file_robosuite_data_msg.joints = joint_array
- self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- return
- rgb = msg.rgb
- depth = msg.depth
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.load(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb,depth,segmentation)
- end_time = time.time()
-
- def listenerCallbackOnlineDebug(self,msg):
- msg = msg.data_pieces[0]
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(msg.rgb)
- if(rgb_np.dtype == np.float32):
- self.float_image_ = True
- rgb_np = (rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- if not os.path.exists('original_rgb'):
- os.makedirs('original_rgb')
- cv2.imwrite('original_rgb/rgb' + str(self.i_) +'.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.rgb.height,msg.rgb.width))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- cv2.imwrite('inf_mask.png',np.isinf(depth_np).astype(np.uint8)*255)
- cv2.imwrite('nan_mask.png',np.isnan(depth_np).astype(np.uint8)*255)
- depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
- depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
- depth_np[np.isnan(depth_np)] = 0
- cv2.imwrite('og_depth.png',self.normalize_depth_image(depth_np))
- panda_ur5_gripper_joints = np.array(msg.joints)
- panda_ur5_gripper_joints = panda_ur5_gripper_joints[:-1]
- ee_pose = self.panda_ur5_gripper_solver_.fk(panda_ur5_gripper_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
- ee_pose = ee_pose @ end_effector_rotation_with_no_translation
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- b_xyz = 1e-5
- b_rpy = 1e-3
- while(qout is None):
- b_xyz *= 10
- b_rpy *= 10
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
- print("Couldn't find good IK")
- qout = self.q_init_
- print("Bound xyz: " + str(b_xyz))
- self.q_init_ = qout
- panda_ur5_arm_joints = panda_ur5_gripper_joints.tolist()
- panda_ur5_gripper_joint = 0.0
- panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
- panda_arm_joints = qout.tolist()
- panda_gripper_joint = 0.04
- panda_gripper_joints = [panda_gripper_joint] * 2
- full_joint_list = []
-
- full_joint_list.extend(panda_arm_joints)
- full_joint_list.extend(panda_ur5_arm_joints)
- qout_msg = Float64MultiArray()
- qout_msg.data = full_joint_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.real_rgb_ = msg.rgb
- self.real_depth_ = depth_np
- self.real_qout_list_ = full_joint_list
- end_time = time.time()
- print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
- # else:
- # qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
- # print("WARNING HIT IK ON FRANKA ERROR")
-
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_panda_gripper_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_real_better_early_inpaint.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_real_better_early_inpaint.py
index 0c31542..9d9a2d9 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_real_better_early_inpaint.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_panda_ur5_gripper_to_panda_gripper_real_better_early_inpaint.py
@@ -87,7 +87,7 @@ def __init__(self):
self.joint_state_msg = JointState()
#Harcoding start position
- self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
+ self.q_init_ = np.array([-0.53852111, 0.08224237, 0.53535473, -2.26215458, -0.0946562 ,2.28648186, -0.10421298])
self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
@@ -273,18 +273,18 @@ def noTimeGazeboCallback(self,joint_msg):
real_seg_255_np = 255 * real_seg_np
gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
gazebo_seg_255_np = 255 * gazebo_seg_np
- gazebo_no_gripper_seg_np = (gazebo_no_gripper_depth_np < 8).astype(np.uint8)
- gazebo_no_gripper_seg_255_np = 255 * gazebo_no_gripper_seg_np
+ real_no_gripper_seg_255_np = (gazebo_no_gripper_depth_np < 8).astype(np.uint8)
+ real_no_gripper_seg_255_np = 255 * real_no_gripper_seg_255_np
cv2.imwrite('real_seg.png',real_seg_255_np)
cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- cv2.imwrite('gazebo_no_gripper_seg.png',gazebo_no_gripper_seg_255_np)
- import pdb
- pdb.set_trace()
+ cv2.imwrite('real_seg2.png',real_no_gripper_seg_255_np)
+
real_rgb = self.real_rgb_
real_depth = self.real_depth_
real_seg = real_seg_255_np
gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
+
+ self.dummyInpaintingEarly(real_rgb,real_seg,real_no_gripper_seg_255_np,gazebo_rgb_np,gazebo_seg_255_np)
#self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
self.updated_joints_ = False
end_time = time.time()
@@ -510,6 +510,74 @@ def convertPointcloudToDepth(self,pointcloud):
depth_image[pixel[1],pixel[0]] = point[2]
return depth_image
+ # RGB AND SEG FILE ARE THE UR5 AND GAZEBO IS THE PANDA AND NO GRIPPER SEG IS UR4
+ def dummyInpaintingEarly(self,rgb,seg_file,seg_file_no_gripper,gazebo_rgb,gazebo_seg):
+
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file_no_gripper = cv2.threshold(seg_file_no_gripper, 128, 255, cv2.THRESH_BINARY)
+ seg_file_gripper = abs(seg_file - seg_file_no_gripper)
+ seg_file_gripper = cv2.erode(seg_file_gripper,np.ones((3,3),np.uint8),iterations=3)
+
+ cv2.imwrite('seg_file_gripper.png',seg_file_gripper)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file_no_gripper)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=40)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255 + seg_file_gripper
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre3.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('inpainting'):
+ os.makedirs('inpainting')
+ if not os.path.exists('mask'):
+ os.makedirs('mask')
+ cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
+ cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image)
+ mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
+ inpainted_image_msg.header.stamp = self.get_clock().now().to_msg()
+ mask_image_msg.header.stamp = inpainted_image_msg.header.stamp
+ inpainted_image_msgs = MultipleInpaintImages()
+ inpainted_image_msgs.images = [inpainted_image_msg,inpainted_image_msg]
+ self.inpainted_publisher_.publish(inpainted_image_msgs)
+ self.mask_image_publisher_.publish(mask_image_msg)
+
def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
if(rgb.dtype == np.float32):
@@ -1092,6 +1160,7 @@ def listenerCallbackOnlineDebug(self,msg):
qout = self.q_init_
print("Bound xyz: " + str(b_xyz))
self.q_init_ = qout
+ # self.q_init_ = np.array([-0.53852111, 0.08224237, 0.53535473, -2.26215458, -0.0946562 ,2.28648186, -0.10421298])
panda_ur5_arm_joints = panda_ur5_gripper_joints.tolist()
panda_ur5_gripper_joint = 0.0
panda_ur5_gripper_joints = [panda_ur5_gripper_joint] * 6
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite.py
deleted file mode 100644
index 620300c..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite.py
+++ /dev/null
@@ -1,920 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = False
- self.panda_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/panda_ik_robosuite.urdf"
- self.panda_solver_ = TracIKSolver(self.panda_urdf_,"world","panda_ee")
- self.ur5e_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5e_ik_robosuite.urdf"
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
- self.ur5e_solver_ = TracIKSolver(self.ur5e_urdf_,"world","ur5e_ee_link")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.subscription_ = self.create_subscription(
- InputFilesRobosuite,
- 'input_files',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesRobosuiteData,
- 'input_files_data',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.04536656 , 0.22302045, -0.01685448, -2.57859539, 0.02532237 , 2.93147512,0.83630218])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.is_ready_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
- # print('Point cloud diff: ', np.linalg.norm(pointcloud_np - np.array(loaded.item()['agentview']['points'])[:, :3]))
-
- depth_data = pointcloud_np[:,2]
- print('Depth data min: ', depth_data.min())
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- # TODO(kush): Clean this up to use actual data, not file names
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- else:
- print("I SHOULDNT BE THERE - NO I SHOULD BE HERE!!!!!!!!!!!!!!")
- rgb_np = np.array(rgb,dtype=np.uint8).reshape((seg_file.width,seg_file.height,3))
- depth_np = np.array(depth,dtype=np.float64).reshape((seg_file.width,seg_file.height))
- seg_color = self.cv_bridge_.imgmsg_to_cv2(seg_file)
- seg = cv2.cvtColor(seg_color,cv2.COLOR_BGR2GRAY)
-
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- robosuite_depth_image_unmasked = depth_np
- robosuite_rgb_image_unmasked = rgb_np
- robosuite_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- robosuite_rgbd_image_unmasked = np.concatenate((robosuite_rgb_image_unmasked,robosuite_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_robosuite_segmentation_mask_255 = cv2.bitwise_not(robosuite_segmentation_mask_255)
- robosuite_rgbd_image_masked = cv2.bitwise_and(robosuite_rgbd_image_unmasked,robosuite_rgbd_image_unmasked,mask=inverted_robosuite_segmentation_mask_255)
- robosuite_rgb_image_masked = robosuite_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- robosuite_depth_image_masked = robosuite_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],robosuite_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 50
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
-
- cv2.imshow('test', image_8bit)
- cv2.waitKey(1)
-
- if self.offline_:
- if not os.path.exists(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results'):
- os.makedirs(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results')
-
- last_slash_index = self.seg_file_.rfind('/')
- underscore_before_last_slash_index = self.seg_file_.rfind('_', 0, last_slash_index)
- str_num = self.seg_file_[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
- mask_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/mask' + str_num +'.png'
- attempt_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/attempt' + str_num +'.png'
- cv2.imwrite(inpaint_file,image_8bit)
- cv2.imwrite(mask_file,gazebo_segmentation_mask_255)
- cv2.imwrite(attempt_file,attempt)
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- return
- import pdb
- pdb.set_trace()
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- robosuite_rgbd_image_unmasked = np.concatenate((robosuite_rgb_image_unmasked,robosuite_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_robosuite_segmentation_mask_255 = cv2.bitwise_not(robosuite_segmentation_mask_255)
- robosuite_rgbd_image_masked = cv2.bitwise_and(robosuite_rgbd_image_unmasked,robosuite_rgbd_image_unmasked,mask=inverted_robosuite_segmentation_mask_255)
- robosuite_rgb_image_masked = robosuite_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- robosuite_depth_image_masked = robosuite_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],robosuite_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- import pdb
- pdb.set_trace()
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/benchturtle/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/benchturtle/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/benchturtle/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
- print("I am the Senate")
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- print("HERE")
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def listenerCallback(self,msg):
- import pdb
- pdb.set_trace()
- if self.is_ready_:
- self.offline_ = True
- start_time = time.time()
- # path_array = msg.rgb.split('/')
- # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- rgb_image = cv2.imread(msg.rgb)
- # cv2.imwrite(input_rgb_path,rgb_image)
- rgb_array = rgb_image.flatten().tolist()
- depth_image = np.load(msg.depth)
- # path_array = msg.depth.split('/')
- # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_depth_path,depth_image)
- depth_array = depth_image.flatten().tolist()
- self.seg_file_ = msg.segmentation
- segmentation_image = cv2.imread(msg.segmentation)
- # path_array = msg.segmentation.split('/')
- # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # cv2.imwrite(input_segmentation_path,segmentation_image)
- segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- joint_array = np.load(msg.joints).tolist()
- # path_array = msg.joints.split('/')
- # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # np.save(input_joints_path,np.array(joint_array))
- input_file_robosuite_data_msg = InputFilesRobosuiteData()
- input_file_robosuite_data_msg.rgb = rgb_array
- input_file_robosuite_data_msg.depth_map = depth_array
- input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- input_file_robosuite_data_msg.joints = joint_array
- self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- return
- rgb = msg.rgb
- depth = msg.depth
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.load(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- print(gripper)
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb,depth,segmentation)
- end_time = time.time()
- print("Total time: " + str(end_time - start_time) + " s")
-
- def listenerCallbackOnlineDebug(self,msg):
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
- rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- cv2.imwrite(online_input_num_folder+'/rgb.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.segmentation.width,msg.segmentation.height))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- seg_color = self.cv_bridge_.imgmsg_to_cv2(msg.segmentation)
- seg = cv2.cvtColor(seg_color,cv2.COLOR_BGR2GRAY)
- cv2.imwrite(online_input_num_folder+'/seg.png',seg)
- np.save(online_input_num_folder+'/joints.npy',np.array(msg.joints))
- rgb = msg.rgb
- depth = msg.depth_map
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.array(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
-
- if qout is not None:
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- print(gripper)
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- else:
- print("WARNING HIT IK ON FRANKA ERROR")
-
- self.setupMeshes(rgb,depth,segmentation)
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_better.py
index 071e0f5..64b1e60 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_better.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_better.py
@@ -2,13 +2,12 @@
import rclpy
from rclpy.node import Node
+import trimesh
from std_msgs.msg import Header, Float64MultiArray,Bool
from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
import numpy as np
from launch_ros.substitutions import FindPackageShare
from launch.substitutions import PathJoinSubstitution
-import trimesh
-import open3d as o3d
import os
import glob
import subprocess
@@ -28,7 +27,6 @@
from tracikpy import TracIKSolver
from mdh.kinematic_chain import KinematicChain
from mdh import UnReachable
-import kinpy as kp
from geometry_msgs.msg import Vector3, Quaternion
from scipy.spatial.transform import Rotation as R
from message_filters import ApproximateTimeSynchronizer, Subscriber
@@ -39,10 +37,13 @@ def __init__(self):
self.is_ready_ = False
self.thetas_ = None
self.debug_ = False
- self.panda_urdf_ = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../../../../src/gazebo_env/description/urdf/panda_ik_robosuite.urdf')
+
+ current_filepath = os.path.abspath(__file__)
+ current_filepath = current_filepath[:current_filepath.rfind('/')]
+ current_filepath + ''
+ self.panda_urdf_ = current_filepath + "/../../share/gazebo_env/urdf/panda_ik_robosuite.urdf"
self.panda_solver_ = TracIKSolver(self.panda_urdf_,"world","panda_ee")
- self.ur5e_urdf_ = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../../../../src/gazebo_env/description/urdf/ur5e_ik_robosuite.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
+ self.ur5e_urdf_ = current_filepath + "/../../share/gazebo_env/urdf/ur5e_ik_robosuite.urdf"
self.ur5e_solver_ = TracIKSolver(self.ur5e_urdf_,"world","ur5e_ee_link")
# real_camera_link to world and then multiply translation by 1000
@@ -81,26 +82,8 @@ def __init__(self):
self.joint_state_msg = JointState()
#Harcoding start position
- # self.q_init_ = np.array([-0.04536656, 0.22302045, -0.01685448, -2.57859539, 0.02532237, 2.93147512, 0.83630218])
- self.q_init_ = np.array([-0.0644708305, 0.528892205, -0.0626163565, 1.85382987, 0.0547194409, 0.614586870, -1.73115559])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
+ self.q_init_ = np.array([-0.04536656 , 0.22302045, -0.01685448, -2.57859539, 0.02532237 , 2.93147512,0.83630218])
+
self.publishers_ = []
self.subscribers_ = []
self.timers_ = []
@@ -135,40 +118,12 @@ def __init__(self):
timer_period = 0.5
self.links_info_ = []
self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
#self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
self.updated_joints_ = False
self.is_ready_ = True
- print("WE READY")
def gazeboCallback(self,joints,gazebo_rgb,gazebo_depth):
joint_command_np = np.array(self.robosuite_qout_list_)
@@ -412,10 +367,10 @@ def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
inverted_joined_depth_argmin = 1 - joined_depth_argmin
- # gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- # gazebo_robot_only_lab[:,:,0] += 50
- # gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- # gazebo_robot_only_rgb = gazebo_robot_only_mod
+ # gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
+ # gazebo_robot_only_lab[:,:,0] += 50
+ # gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ # gazebo_robot_only_rgb = gazebo_robot_only_mod
attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
inpainted_image = attempt + attempt2
image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
@@ -851,8 +806,6 @@ def listenerCallbackOnlineDebug(self,msg):
ee_pose = ee_pose @ end_effector_rotation_with_no_translation
scipy_rotation = R.from_matrix(ee_pose[:3,:3])
scipy_quaternion = scipy_rotation.as_quat()
- # import pdb
- # pdb.set_trace()
qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
b_xyz = 1e-5
b_rpy = 1e-3
@@ -918,7 +871,6 @@ def listenerCallbackData(self,msg):
qout_msg.data = qout_list
self.panda_joint_command_publisher_.publish(qout_msg)
self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
# end_time = time.time()
# print("Total time: " + str(end_time - start_time) + " s")
@@ -937,7 +889,6 @@ def joint_commands_callback(self, msg):
# msg.data.append(gripper_val)
self.joint_state_msg.position = msg.data
self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
self.panda_joint_state_publisher_.publish(self.joint_state_msg)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_diffusion_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_diffusion_better.py
deleted file mode 100644
index 1d1aa9b..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_diffusion_better.py
+++ /dev/null
@@ -1,1038 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray,Bool
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesFrankaToFranka, InputFilesDiffusionData
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-from message_filters import ApproximateTimeSynchronizer, Subscriber
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = False
- self.panda_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/panda_ik_robosuite.urdf"
- self.panda_solver_ = TracIKSolver(self.panda_urdf_,"world","panda_ee")
- self.ur5e_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5e_ik_robosuite.urdf"
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
- self.ur5e_solver_ = TracIKSolver(self.ur5e_urdf_,"world","ur5e_ee_link")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[309.01933598, 0. , 128. ],
- [ 0. , 309.01933598, 128. ],
- [ 0. , 0. , 1. ]])
- self.panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.hdf5_msg_ = None
- self.joint_state_subscriber_ = self.create_subscription(JointState,'/gazebo_joint_states',self.gazeboCallback,1)
- self.pdb_debug_ = False
- self.gazebo_rgb_image_subscriber_ = Subscriber(self,Image,'/depth_camera/image_raw')
- self.gazebo_depth_image_subscriber_ = Subscriber(self,Image,'/depth_camera/depth/image_raw')
- self.gazebo_image_time_synchronizer_ = TimeSynchronizer([self.gazebo_rgb_image_subscriber_,self.gazebo_depth_image_subscriber_],1)
- self.gazebo_image_time_synchronizer_.registerCallback(self.gazeboImageCallback)
- self.subscription_ = self.create_subscription(
- InputFilesFrankaToFranka,
- '/input_data',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesDiffusionData,
- 'input_files_data',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.04536656 , 0.22302045, -0.01685448, -2.57859539, 0.02532237 , 2.93147512,0.83630218])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.robosuite_rgb_ = None
- self.robosuite_depth_ = None
- self.robosuite_seg_ = None
- self.robosuite_qout_list_ = None
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.updated_joints_ = False
- self.gazebo_rgb_ = None
- self.gazebo_depth_ = None
- self.is_ready_ = True
-
- def gazeboImageCallback(self,gazebo_rgb,gazebo_depth):
- self.gazebo_rgb_ = gazebo_rgb
- self.gazebo_depth_ = gazebo_depth
-
- def gazeboCallback(self,joints):
- print("Publishing Demo " + str(self.hdf5_msg_.demo_num.data) + " " + str(self.hdf5_msg_.traj_num.data))
- gazebo_rgb = None
- gazebo_depth = None
- while(gazebo_rgb is None):
- gazebo_rgb = self.gazebo_rgb_
- gazebo_depth = self.gazebo_depth_
- print("Gazebo images offline")
-
- joint_command_np = np.array(self.robosuite_qout_list_)
- gazebo_joints_np = np.array(joints.position)
- start_time = time.time()
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- robosuite_rgb = self.robosuite_rgb_
- robosuite_depth = self.robosuite_depth_
- robosuite_seg = self.robosuite_seg_
-
- self.inpainting(robosuite_rgb,robosuite_depth,robosuite_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time: " + str(end_time - start_time) + " seconds")
- return
- #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
-
- #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
- # self.updated_joints_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
-
- depth_data = pointcloud_np[:,2]
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def simpleInpainting(self,hdf5_msg,gazebo_rgb,gazebo_seg,gazebo_depth):
- mask_image = gazebo_seg
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(gazebo_rgb)
- gazebo_masked_image = cv2.bitwise_and(gazebo_rgb,gazebo_rgb, mask=mask_image)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- hdf5_rgb = self.cv_bridge_.imgmsg_to_cv2(hdf5_msg.rgb)
- inverted_mask_image = cv2.bitwise_not(mask_image)
- hdf5_background = cv2.bitwise_and(hdf5_rgb,hdf5_rgb,mask=inverted_mask_image)
- inpainted_image = gazebo_masked_image + hdf5_background
- image_8bit = cv2.convertScaleAbs(inpainted_image)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- online_input_folder = 'franka_to_franka'
- demo_input_folder = online_input_folder + '/demo_' + str(hdf5_msg.demo_num.data)
- inpaint_file_path = demo_input_folder + '/inpaint' + str(hdf5_msg.traj_num.data) + '.png'
- cv2.imwrite(inpaint_file_path,image_8bit)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
-
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- # TODO(kush): Clean this up to use actual data, not file names
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- print("I SHOULDNT BE THERE - NO I SHOULD BE HERE!!!!!!!!!!!!!!")
- rgb_np = np.array(rgb,dtype=np.uint8).reshape((seg_file.width,seg_file.height,3))
- depth_np = np.array(depth,dtype=np.float64).reshape((seg_file.width,seg_file.height))
- seg_color = self.cv_bridge_.imgmsg_to_cv2(seg_file)
- seg = cv2.cvtColor(seg_color,cv2.COLOR_BGR2GRAY)
-
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
-
- robosuite_depth_image_unmasked = depth_np
- robosuite_rgb_image_unmasked = rgb_np
- cv2.imwrite('/home/benchturtle/original_rgb.png',robosuite_rgb_image_unmasked)
- robosuite_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- robosuite_rgbd_image_unmasked = np.concatenate((robosuite_rgb_image_unmasked,robosuite_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_robosuite_segmentation_mask_255 = cv2.bitwise_not(robosuite_segmentation_mask_255)
- robosuite_rgbd_image_masked = cv2.bitwise_and(robosuite_rgbd_image_unmasked,robosuite_rgbd_image_unmasked,mask=inverted_robosuite_segmentation_mask_255)
- robosuite_rgb_image_masked = robosuite_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- robosuite_depth_image_masked = robosuite_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],robosuite_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 50
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- if self.offline_:
- if not os.path.exists(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results'):
- os.makedirs(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results')
-
- last_slash_index = self.seg_file_.rfind('/')
- underscore_before_last_slash_index = self.seg_file_.rfind('_', 0, last_slash_index)
- str_num = self.seg_file_[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
- mask_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/mask' + str_num +'.png'
- attempt_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/attempt' + str_num +'.png'
- cv2.imwrite(inpaint_file,image_8bit)
- cv2.imwrite(mask_file,gazebo_segmentation_mask_255)
- cv2.imwrite(attempt_file,attempt)
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- online_input_folder = 'franka_to_franka'
- demo_input_folder = online_input_folder + '/demo_' + str(self.hdf5_msg_.demo_num.data)
- inpaint_file_path = demo_input_folder + '/inpaint' + str(self.hdf5_msg_.traj_num.data) + '.png'
- cv2.imwrite(inpaint_file_path,image_8bit)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
- import pdb
- pdb.set_trace()
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- robosuite_rgbd_image_unmasked = np.concatenate((robosuite_rgb_image_unmasked,robosuite_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_robosuite_segmentation_mask_255 = cv2.bitwise_not(robosuite_segmentation_mask_255)
- robosuite_rgbd_image_masked = cv2.bitwise_and(robosuite_rgbd_image_unmasked,robosuite_rgbd_image_unmasked,mask=inverted_robosuite_segmentation_mask_255)
- robosuite_rgb_image_masked = robosuite_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- robosuite_depth_image_masked = robosuite_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],robosuite_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- import pdb
- pdb.set_trace()
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/benchturtle/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/benchturtle/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/benchturtle/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
- print("I am the Senate")
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.listenerCallbackOnlineDebug(msg)
- return
-
- def listenerCallbackOnlineDebug(self,msg):
- self.i_ += 1
- online_input_folder = 'franka_to_franka'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- demo_input_folder = online_input_folder + '/demo_' + str(msg.demo_num.data)
- if not os.path.exists(demo_input_folder):
- os.makedirs(demo_input_folder)
- joints = msg.joints
- joint_array = np.array(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- if(msg.demo_num.data == 1):
- self.pdb_debug_ = True
- if qout is not None:
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
-
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.robosuite_rgb_ = msg.rgb
- self.robosuite_depth_ = msg.depth_map
- self.robosuite_seg_ = msg.segmentation
- self.robosuite_qout_list_ = qout_list
- self.hdf5_msg_ = msg
- else:
- print("WARNING HIT IK ON FRANKA ERROR")
-
- # def listenerCallback(self,msg):
- # if self.is_ready_:
- # self.offline_ = True
- # start_time = time.time()
- # # path_array = msg.rgb.split('/')
- # # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # # if not os.path.exists(online_input_folder):
- # # os.makedirs(online_input_folder)
- # # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # # if not os.path.exists(online_input_num_folder):
- # # os.makedirs(online_input_num_folder)
- # # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # rgb_image = cv2.imread(msg.rgb)
- # # cv2.imwrite(input_rgb_path,rgb_image)
- # rgb_array = rgb_image.flatten().tolist()
- # depth_image = np.load(msg.depth)
- # # path_array = msg.depth.split('/')
- # # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # # np.save(input_depth_path,depth_image)
- # depth_array = depth_image.flatten().tolist()
- # self.seg_file_ = msg.segmentation
- # segmentation_image = cv2.imread(msg.segmentation)
- # # path_array = msg.segmentation.split('/')
- # # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # # cv2.imwrite(input_segmentation_path,segmentation_image)
- # segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- # joint_array = np.load(msg.joints).tolist()
- # # path_array = msg.joints.split('/')
- # # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # # np.save(input_joints_path,np.array(joint_array))
- # input_file_robosuite_data_msg = InputFilesRobosuiteData()
- # input_file_robosuite_data_msg.rgb = rgb_array
- # input_file_robosuite_data_msg.depth_map = depth_array
- # input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- # input_file_robosuite_data_msg.joints = joint_array
- # self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- # return
- # rgb = msg.rgb
- # depth = msg.depth
- # segmentation = msg.segmentation
- # joints = msg.joints
- # joint_array = np.load(joints)
- # gripper = joint_array[-1]
- # joint_array = joint_array[:-1]
-
- # ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- # scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- # scipy_quaternion = scipy_rotation.as_quat()
- # qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- # self.q_init_ = qout
- # # Hardcoded gripper
- # qout_list = qout.tolist()
- # print(gripper)
- # panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- # qout_list.append(panda_gripper_command)
- # qout_msg = Float64MultiArray()
- # qout_msg.data = qout_list
- # self.panda_joint_command_publisher_.publish(qout_msg)
- # self.joint_commands_callback(qout_msg)
- # self.setupMeshes(rgb,depth,segmentation)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
- # def listenerCallbackOnlineDebug(self,msg):
- # self.i_ += 1
- # online_input_folder = 'offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- # cv2.imwrite(online_input_num_folder+'/rgb.png',rgb_np)
- # depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.segmentation.width,msg.segmentation.height))
- # np.save(online_input_num_folder+'/depth.npy',depth_np)
- # seg_color = self.cv_bridge_.imgmsg_to_cv2(msg.segmentation)
- # seg = cv2.cvtColor(seg_color,cv2.COLOR_BGR2GRAY)
- # cv2.imwrite(online_input_num_folder+'/seg.png',seg)
- # np.save(online_input_num_folder+'/joints.npy',np.array(msg.joints))
- # rgb = msg.rgb
- # depth = msg.depth_map
- # segmentation = msg.segmentation
- # joints = msg.joints
- # joint_array = np.array(joints)
- # gripper = joint_array[-1]
- # joint_array = joint_array[:-1]
-
- # ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- # scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- # scipy_quaternion = scipy_rotation.as_quat()
- # qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
-
- # if qout is not None:
- # self.q_init_ = qout
- # # Hardcoded gripper
- # qout_list = qout.tolist()
-
- # panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- # qout_list.append(panda_gripper_command)
- # qout_msg = Float64MultiArray()
- # qout_msg.data = qout_list
- # self.panda_joint_command_publisher_.publish(qout_msg)
- # self.joint_commands_callback(qout_msg)
- # self.robosuite_rgb_ = msg.rgb
- # self.robosuite_depth_ = msg.depth_map
- # self.robosuite_seg_ = msg.segmentation
- # self.robosuite_qout_list_ = qout_list
- # else:
- # print("WARNING HIT IK ON FRANKA ERROR")
-
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_franka_to_franka.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_franka_to_franka.py
deleted file mode 100644
index a56c0a7..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_franka_to_franka.py
+++ /dev/null
@@ -1,854 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesFrankaToFranka
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = False
- self.panda_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/panda_ik_robosuite.urdf"
- self.panda_solver_ = TracIKSolver(self.panda_urdf_,"world","panda_ee")
- self.ur5e_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5e_ik_robosuite.urdf"
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
- self.ur5e_solver_ = TracIKSolver(self.ur5e_urdf_,"world","ur5e_ee_link")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.subscription_ = self.create_subscription(
- InputFilesFrankaToFranka,
- '/input_data',
- self.listenerCallback,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.04536656 , 0.22302045, -0.01685448, -2.57859539, 0.02532237 , 2.93147512,0.83630218])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.is_ready_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
- # print('Point cloud diff: ', np.linalg.norm(pointcloud_np - np.array(loaded.item()['agentview']['points'])[:, :3]))
-
- depth_data = pointcloud_np[:,2]
- print('Depth data min: ', depth_data.min())
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- # TODO(kush): Clean this up to use actual data, not file names
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- else:
- print("I SHOULDNT BE THERE - NO I SHOULD BE HERE!!!!!!!!!!!!!!")
- rgb_np = np.array(rgb,dtype=np.uint8).reshape((seg_file.width,seg_file.height,3))
- depth_np = np.array(depth,dtype=np.float64).reshape((seg_file.width,seg_file.height))
- seg_color = self.cv_bridge_.imgmsg_to_cv2(seg_file)
- seg = cv2.cvtColor(seg_color,cv2.COLOR_BGR2GRAY)
-
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
- robosuite_depth_image_unmasked = depth_np
- robosuite_rgb_image_unmasked = rgb_np
- robosuite_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- robosuite_rgbd_image_unmasked = np.concatenate((robosuite_rgb_image_unmasked,robosuite_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_robosuite_segmentation_mask_255 = cv2.bitwise_not(robosuite_segmentation_mask_255)
- robosuite_rgbd_image_masked = cv2.bitwise_and(robosuite_rgbd_image_unmasked,robosuite_rgbd_image_unmasked,mask=inverted_robosuite_segmentation_mask_255)
- robosuite_rgb_image_masked = robosuite_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- robosuite_depth_image_masked = robosuite_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],robosuite_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 50
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
-
- if self.offline_:
- if not os.path.exists(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results'):
- os.makedirs(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results')
-
- last_slash_index = self.seg_file_.rfind('/')
- underscore_before_last_slash_index = self.seg_file_.rfind('_', 0, last_slash_index)
- str_num = self.seg_file_[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
- mask_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/mask' + str_num +'.png'
- attempt_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/attempt' + str_num +'.png'
- cv2.imwrite(inpaint_file,image_8bit)
- cv2.imwrite(mask_file,gazebo_segmentation_mask_255)
- cv2.imwrite(attempt_file,attempt)
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- return
- import pdb
- pdb.set_trace()
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- robosuite_rgbd_image_unmasked = np.concatenate((robosuite_rgb_image_unmasked,robosuite_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_robosuite_segmentation_mask_255 = cv2.bitwise_not(robosuite_segmentation_mask_255)
- robosuite_rgbd_image_masked = cv2.bitwise_and(robosuite_rgbd_image_unmasked,robosuite_rgbd_image_unmasked,mask=inverted_robosuite_segmentation_mask_255)
- robosuite_rgb_image_masked = robosuite_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- robosuite_depth_image_masked = robosuite_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],robosuite_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- import pdb
- pdb.set_trace()
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,hdf5_msg):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- hdf5_rgb = self.cv_bridge_.imgmsg_to_cv2(hdf5_msg.rgb)
- inverted_mask_image = cv2.bitwise_not(mask_image)
- hdf5_background = cv2.bitwise_and(hdf5_rgb,hdf5_rgb,mask=inverted_mask_image)
- inpainted_image = gazebo_masked_image + hdf5_background
- image_8bit = cv2.convertScaleAbs(inpainted_image)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- online_input_folder = 'franka_to_franka'
- demo_input_folder = online_input_folder + '/demo_' + str(hdf5_msg.demo_num.data)
- inpaint_file_path = demo_input_folder + '/inpaint' + str(hdf5_msg.traj_num.data) + '.png'
- cv2.imwrite(inpaint_file_path,image_8bit)
- return
- np.save('/home/benchturtle/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/benchturtle/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/benchturtle/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
- print("I am the Senate")
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,msg):
- if(self.is_ready_):
- print("HERE")
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,msg)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.listenerCallbackOnlineDebug(msg)
- return
- rgb = msg.rgb
- depth = msg.depth
- segmentation = msg.segmentation
- joints = msg.joints
- joint_array = np.load(joints)
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- print(gripper)
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb,depth,segmentation)
- end_time = time.time()
- print("Total time: " + str(end_time - start_time) + " s")
-
- def listenerCallbackOnlineDebug(self,msg):
- self.i_ += 1
- online_input_folder = 'franka_to_franka'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- demo_input_folder = online_input_folder + '/demo_' + str(msg.demo_num.data)
- if not os.path.exists(demo_input_folder):
- os.makedirs(demo_input_folder)
- qout_msg = Float64MultiArray()
- qout_msg.data = np.array(msg.joints).tolist()
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(msg)
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_franka_to_franka_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_franka_to_franka_better.py
deleted file mode 100644
index 5df2fb2..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_franka_to_franka_better.py
+++ /dev/null
@@ -1,998 +0,0 @@
-#!/usr/bin/env python3
-
-import rclpy
-from rclpy.node import Node
-import trimesh
-import open3d as o3d
-from std_msgs.msg import Header, Float64MultiArray,Bool
-from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
-import numpy as np
-from launch_ros.substitutions import FindPackageShare
-from launch.substitutions import PathJoinSubstitution
-import os
-import glob
-import subprocess
-import xml.etree.ElementTree as ET
-from functools import partial
-import math
-from message_filters import TimeSynchronizer, Subscriber
-from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
-from sensor_msgs_py import point_cloud2
-import cv2
-from cv_bridge import CvBridge
-import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesFrankaToFranka
-from sensor_msgs.msg import JointState
-from tracikpy import TracIKSolver
-from mdh.kinematic_chain import KinematicChain
-from mdh import UnReachable
-import kinpy as kp
-from geometry_msgs.msg import Vector3, Quaternion
-from scipy.spatial.transform import Rotation as R
-from message_filters import ApproximateTimeSynchronizer, Subscriber
-
-class WriteData(Node):
- def __init__(self):
- super().__init__('write_data_node')
- self.is_ready_ = False
- self.thetas_ = None
- self.debug_ = False
- self.panda_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/panda_ik_robosuite.urdf"
- self.panda_solver_ = TracIKSolver(self.panda_urdf_,"world","panda_ee")
- self.ur5e_urdf_ = "/home/benchturtle/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5e_ik_robosuite.urdf"
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
- self.ur5e_solver_ = TracIKSolver(self.ur5e_urdf_,"world","ur5e_ee_link")
-
- # real_camera_link to world and then multiply translation by 1000
- # self.camera_to_world_ = np.array([[0,1,0,0],
- # [0.258,0,-0.966,989],
- # [-0.966,0,-0.258,1919],
- # [0,0,0,1]])
- self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
- [0.706, 0.000 ,-0.708, 603],
- [-0.708, 0.000, -0.706 , 1307],
- [0,0,0,1]])
-
- self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
- [ 0. , 101.39696962, 42. ],
- [ 0. , 0. , 1. ]])
- self.panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
- self.panda_joint_state_publisher_ = self.create_publisher(
- JointState,
- '/joint_states', # Topic name for /joint_states
- 1)
- self.hdf5_msg_ = None
- self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
- self.gazebo_rgb_image_subscriber_ = Subscriber(self,Image,'/depth_camera/image_raw')
- self.gazebo_depth_image_subscriber_ = Subscriber(self,Image,'/depth_camera/depth/image_raw')
- self.gazebo_time_synchronizer_ = ApproximateTimeSynchronizer([self.joint_state_subscriber_,self.gazebo_rgb_image_subscriber_,self.gazebo_depth_image_subscriber_],1,1)
- self.gazebo_time_synchronizer_.registerCallback(self.gazeboCallback)
- self.subscription_ = self.create_subscription(
- InputFilesFrankaToFranka,
- '/input_data',
- self.listenerCallback,
- 1)
- self.subscription_data_ = self.create_subscription(
- InputFilesRobosuiteData,
- 'input_files_data',
- self.listenerCallbackOnlineDebug,
- 1)
- self.joint_state_msg = JointState()
-
- #Harcoding start position
- self.q_init_ = np.array([-0.04536656 , 0.22302045, -0.01685448, -2.57859539, 0.02532237 , 2.93147512,0.83630218])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
- self.publishers_ = []
- self.subscribers_ = []
- self.timers_ = []
- self.robosuite_rgb_ = None
- self.robosuite_depth_ = None
- self.robosuite_seg_ = None
- self.robosuite_qout_list_ = None
- self.i_ = -1
- self.offline_ = False
- self.seg_file_ = None
- self.tf_buffer_ = Buffer()
- self.tf_listener_ = TransformListener(self.tf_buffer_, self)
- self.camera_intrinsic_matrix_ = None
- self.image_shape_ = None
- # REMEMBER TO HARDCODE THIS
- self.robosuite_image_shape_ = (84,84)
- self.camera_intrinsic_subscription_ = self.create_subscription(
- CameraInfo,
- '/camera/camera_info',
- self.cameraInfoCallback,
- 1
- )
- self.camera_color_subscription_ = self.create_subscription(
- Image,
- '/camera/image_raw',
- self.cameraCallback,
- 1
- )
- self.cv_bridge_ = CvBridge()
- self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
- self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
- timer_period = 0.5
- self.links_info_ = []
- self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
- self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
- #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
- self.updated_joints_ = False
- self.is_ready_ = True
-
- def gazeboCallback(self,joints,gazebo_rgb,gazebo_depth):
- joint_command_np = np.array(self.robosuite_qout_list_)
- gazebo_joints_np = np.array(joints.position)
- start_time = time.time()
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- robosuite_rgb = self.robosuite_rgb_
- robosuite_depth = self.robosuite_depth_
- robosuite_seg = self.robosuite_seg_
-
- self.simpleInpainting(self.hdf5_msg_,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
- end_time = time.time()
- print("Algo time: " + str(end_time - start_time) + " seconds")
- return
- #if(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01):
-
- #elif(np.linalg.norm(joint_command_np - gazebo_joints_np,2) < 0.01 and not self.updated_joints_):
- # self.updated_joints_ = True
-
- def cameraInfoCallback(self,msg):
- if(self.is_ready_):
- self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
- self.image_shape_ = (msg.height,msg.width,3)
-
- def cameraCallback(self,msg):
- if(self.is_ready_):
- self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
- #cv2.imwrite('gazebo_rgb.png',self.original_image_)
-
- def prelimMeshFast(self,filename,link_name,rpy_str,xyz_str):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
- return open3d_mesh
-
- def getPixels(self,msg):
- if(self.is_ready_):
- msg_data = point_cloud2.read_points(msg)
- msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
- depth_data = msg_data[:,2]
- camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
- return pixel_data, depth_data
-
- def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
- """
- Helper function to project a batch of points in the world frame
- into camera pixels using the world to camera transformation.
-
- Args:
- points (np.array): 3D points in world frame to project onto camera pixel locations. Should
- be shape [..., 3].
- world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
- coordinates.
- camera_height (int): height of the camera image
- camera_width (int): width of the camera image
-
- Return:
- pixels (np.array): projected pixel indices of shape [..., 2]
- """
- assert points.shape[-1] == 3 # last dimension must be 3D
- assert len(world_to_camera_transform.shape) == 2
- assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
-
- # convert points to homogenous coordinates -> (px, py, pz, 1)
- ones_pad = np.ones(points.shape[:-1] + (1,))
- points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
-
- # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
- mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
- cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
- pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
-
- # re-scaling from homogenous coordinates to recover pixel values
- # (x, y, z) -> (x / z, y / z)
- pixels = pixels / pixels[..., 2:3]
- pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
- # swap first and second coordinates to get pixel indices that correspond to (height, width)
- # and also clip pixels that are out of range of the camera image
- pixels = np.concatenate(
- (
- pixels[..., 1:2].clip(0, camera_height - 1),
- pixels[..., 0:1].clip(0, camera_width - 1),
- ),
- axis=-1,
- )
- if(pixel_to_point_dicts):
- points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
- pixels_to_points_robosuite = {}
- for key,value in points_to_pixels_robosuite.items():
- pixels_to_points_robosuite.setdefault(value,[]).append(key)
- return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
- else:
- return pixels
-
- def normalize_depth_image(self,depth_image):
- # Define the minimum and maximum depth values in your depth image
- min_depth = np.min(depth_image)
- max_depth = np.max(depth_image)
-
- # Normalize the depth image to the range [0, 1]
- normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
-
- # Optionally, you can scale the normalized image to a different range
- # For example, to scale it to [0, 255] for visualization:
- normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
- return normalized_depth_image
-
- def convertPointcloudToDepth(self,pointcloud):
- if(type(pointcloud) == str):
- pointcloud_np = np.load(pointcloud)
- else:
- pointcloud_np = pointcloud
- world_to_camera = np.array([[-5.55111512e-17, 2.58174524e-01, -9.66098295e-01,
- 1.60000000e+00],
- [ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 0.00000000e+00],
- [ 0.00000000e+00, -9.66098295e-01, -2.58174524e-01,
- 1.45000000e+00],
- [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
- 1.00000000e+00]])
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(pointcloud_np)
- pcd.transform(np.linalg.inv(world_to_camera))
- pointcloud_np = np.asarray(pcd.points)
-
- # loaded = np.load('/home/benchturtle/cross_embodiment_ws/src/gazebo_env/robotsuite_outputs/UR5e_output3.npy', allow_pickle=True)
-
- depth_data = pointcloud_np[:,2]
-
- fx = self.robosuite_intrinsic_matrix_[0,0]
- fy = self.robosuite_intrinsic_matrix_[1,1]
- cx = self.robosuite_intrinsic_matrix_[0,2]
- cy = self.robosuite_intrinsic_matrix_[1,2]
- u_pixels = ((pointcloud_np[:,0]*fx) / depth_data) + cx
- u_pixels = np.round(u_pixels).astype(int)
- v_pixels = ((pointcloud_np[:,1]*fy) / depth_data) + cy
- v_pixels = np.round(v_pixels).astype(int)
- pixels = np.vstack((u_pixels,v_pixels)).T
- pixels_to_points = {tuple(pixel): tuple(point) for pixel, point in zip(pixels,pointcloud_np)}
- depth_image = np.zeros(self.robosuite_image_shape_)
- for pixel,point in pixels_to_points.items():
- depth_image[pixel[1],pixel[0]] = point[2]
- return depth_image
-
- def simpleInpainting(self,hdf5_msg,gazebo_rgb,gazebo_seg,gazebo_depth):
- mask_image = gazebo_seg
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(gazebo_rgb)
- gazebo_masked_image = cv2.bitwise_and(gazebo_rgb,gazebo_rgb, mask=mask_image)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- hdf5_rgb = self.cv_bridge_.imgmsg_to_cv2(hdf5_msg.rgb)
- inverted_mask_image = cv2.bitwise_not(mask_image)
- hdf5_background = cv2.bitwise_and(hdf5_rgb,hdf5_rgb,mask=inverted_mask_image)
- inpainted_image = gazebo_masked_image + hdf5_background
- image_8bit = cv2.convertScaleAbs(inpainted_image)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- online_input_folder = 'franka_to_franka'
- demo_input_folder = online_input_folder + '/demo_' + str(hdf5_msg.demo_num.data)
- inpaint_file_path = demo_input_folder + '/inpaint' + str(hdf5_msg.traj_num.data) + '.png'
- cv2.imwrite(inpaint_file_path,image_8bit)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
-
- def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
- # TODO(kush): Clean this up to use actual data, not file names
- if type(rgb) == str:
- rgb_np = cv2.imread(rgb)
- seg = cv2.imread(seg_file,0)
- depth_np = np.load(depth)
- return
- else:
- print("I SHOULDNT BE THERE - NO I SHOULD BE HERE!!!!!!!!!!!!!!")
- rgb_np = np.array(rgb,dtype=np.uint8).reshape((seg_file.width,seg_file.height,3))
- depth_np = np.array(depth,dtype=np.float64).reshape((seg_file.width,seg_file.height))
- seg_color = self.cv_bridge_.imgmsg_to_cv2(seg_file)
- seg = cv2.cvtColor(seg_color,cv2.COLOR_BGR2GRAY)
-
- _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
-
- robosuite_depth_image_unmasked = depth_np
- robosuite_rgb_image_unmasked = rgb_np
- cv2.imwrite('/home/benchturtle/original_rgb.png',robosuite_rgb_image_unmasked)
- robosuite_segmentation_mask_255 = seg
- gazebo_robot_only_rgb = gazebo_rgb
- gazebo_segmentation_mask_255 = gazebo_seg
- gazebo_robot_only_depth = gazebo_depth
- robosuite_rgbd_image_unmasked = np.concatenate((robosuite_rgb_image_unmasked,robosuite_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_robosuite_segmentation_mask_255 = cv2.bitwise_not(robosuite_segmentation_mask_255)
- robosuite_rgbd_image_masked = cv2.bitwise_and(robosuite_rgbd_image_unmasked,robosuite_rgbd_image_unmasked,mask=inverted_robosuite_segmentation_mask_255)
- robosuite_rgb_image_masked = robosuite_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- robosuite_depth_image_masked = robosuite_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],robosuite_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
- joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
-
- robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
- gazebo_robot_only_lab[:,:,0] += 50
- gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
- gazebo_robot_only_rgb = gazebo_robot_only_mod
- attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- if self.offline_:
- if not os.path.exists(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results'):
- os.makedirs(self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results')
-
- last_slash_index = self.seg_file_.rfind('/')
- underscore_before_last_slash_index = self.seg_file_.rfind('_', 0, last_slash_index)
- str_num = self.seg_file_[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
- mask_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/mask' + str_num +'.png'
- attempt_file = self.seg_file_[:self.seg_file_.rfind('/', 0, self.seg_file_.rfind('/')) + 1] + 'results/attempt' + str_num +'.png'
- cv2.imwrite(inpaint_file,image_8bit)
- cv2.imwrite(mask_file,gazebo_segmentation_mask_255)
- cv2.imwrite(attempt_file,attempt)
-
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
- ready_for_next_message = Bool()
- ready_for_next_message.data = True
- self.ready_for_next_input_publisher_.publish(ready_for_next_message)
- return
- import pdb
- pdb.set_trace()
- transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
- robosuite_rgbd_image_unmasked = np.concatenate((robosuite_rgb_image_unmasked,robosuite_depth_image_unmasked[:,:,np.newaxis]),axis=2)
- inverted_robosuite_segmentation_mask_255 = cv2.bitwise_not(robosuite_segmentation_mask_255)
- robosuite_rgbd_image_masked = cv2.bitwise_and(robosuite_rgbd_image_unmasked,robosuite_rgbd_image_unmasked,mask=inverted_robosuite_segmentation_mask_255)
- robosuite_rgb_image_masked = robosuite_rgbd_image_masked[:,:,0:3].astype(np.uint8)
- robosuite_depth_image_masked = robosuite_rgbd_image_masked[:,:,-1]
- joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],robosuite_depth_image_masked[np.newaxis]),axis=0)
- joined_depth[joined_depth == 0] = 1000
- joined_depth_argmin = np.argmin(joined_depth,axis=0)
- robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
- inverted_joined_depth_argmin = 1 - joined_depth_argmin
- attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
- inpainted_image = attempt + attempt2
- image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
- last_slash_index = seg_file.rfind('/')
- underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
- str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
- inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
-
- cv2.imwrite(inpaint_file,image_8bit)
- inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
- self.inpainted_publisher_.publish(inpainted_image_msg)
-
- def custom_mode_filter(self,image, mask, kernel_size):
- result = np.zeros_like(image)
- pad = kernel_size // 2
- for i in range(pad, image.shape[0] - pad):
- for j in range(pad, image.shape[1] - pad):
- if mask[i, j]:
- neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
- unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
- mode_color = unique[np.argmax(counts)]
- result[i, j] = mode_color
-
- return result
-
- def replace_black_with_surrounding_mode(self,img):
-
- # Convert the image to grayscale
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
- # Threshold the grayscale image to identify black pixels
- _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
-
- # Invert the mask to mark black pixels as True (255) and others as False (0)
- #mask = np.where(thresh == 0, 255, 0)
- mask = thresh
- import pdb
- pdb.set_trace()
- # Apply custom mode filter to find the mode RGB value of the surrounding pixels
- replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
-
- return replaced_img
-
- def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
- pointcloud = point_cloud2.read_points(pointcloud_msg)
- # Pointcloud is in camera frame
- pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
-
- gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
- _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
- robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
- robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
- robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
- transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
- transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
- gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
- for pixel in pixels_to_points_robosuite.keys():
- points = pixels_to_points_robosuite[pixel]
- points_np = np.array([list(point) for point in points])
- depth_value = np.mean(points_np[:,2])
- gazebo_pixels = []
- for point in points:
- gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
- rgb_values = []
- for gazebo_pixel in gazebo_pixels:
- rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
-
- rgb_values_np = np.array(rgb_values)
- mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
- transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
- gazebo_depth[pixel[0],pixel[1]] = depth_value
- transformed_gazebo_seg[pixel[0],pixel[1]] = 255
- gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
- gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
- transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
- transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
- transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
- transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
- #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
- #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
- inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
- transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
- return transformed_gazebo_rgb,gazebo_depth_after
-
- def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
- if(self.is_ready_):
- if(self.camera_intrinsic_matrix_ is None):
- return
- all_pixels,depth_data = self.getPixels(msg)
- mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
- depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
- white_color = (255,255,255)
- i = 0
- block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
- for coord in all_pixels:
- x,y = coord
- mask_image[round(x), round(y)] = 255
- depth_image[round(x), round(y)] = depth_data[i]
- if(block_background_mask[round(x),round(y)]):
- clean_mask_image[round(x),round(y)] = 255
- clean_depth_image[round(x),round(y)] = depth_data[i]
- i += 1
- cv2.imwrite('clean_mask_image.png',clean_mask_image)
- cv2.imwrite('mask_image.png',mask_image)
- cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
- mask_image = clean_mask_image
- depth_image = clean_depth_image
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- gazebo_masked_image = np.zeros_like(self.original_image_)
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
- cv2.imwrite('original_image.png',self.original_image_)
- cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
- return
- np.save('/home/benchturtle/gazebo_robot_depth.npy',depth_image)
- old_mask_image = mask_image
- mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
- _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
- if(self.original_image_ is not None):
- mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
- im_floodfill = mask_image.copy()
- (h,w) = mask_image.shape
- mask = np.zeros((h+2,w+2),np.uint8)
- cv2.floodFill(im_floodfill,mask,(0,0),255)
- im_floodfill_inv = cv2.bitwise_not(im_floodfill)
- im_out = mask_image | im_floodfill_inv
- mask_image = im_out
- # Create a new image with the same size as the original image
- gazebo_masked_image = np.zeros_like(self.original_image_)
-
- # Apply the gazebo_mask to the original image using element-wise multiplication
- gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
- gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
- cv2.imwrite('/home/benchturtle/gazebo_robot_only.jpg',gazebo_masked_image)
- cv2.imwrite('/home/benchturtle/gazebo_mask.jpg',mask_image)
- #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
- ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
- self.full_mask_image_publisher_.publish(ros_mask_image)
- self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
- print("I am the Senate")
-
- def setupMesh(self,filename,link_name,rpy_str,xyz_str):
- if(self.is_ready_):
- # RPY is in ZYX I'm pretty sure
- mesh_scene = trimesh.load(filename)
- if(link_name == "panda_link6"):
- meshes = mesh_scene.geometry
- for mesh in meshes:
- if(mesh != 'Shell006_000-mesh'):
- vertices = o3d.utility.Vector3dVector(meshes[mesh].vertices)
- triangles = o3d.utility.Vector3iVector(meshes[mesh].faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- open3d_mesh.translate(np.array([0,0,-0.028439417985386614/2]))
- meshes[mesh] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- #meshes['Shell012_000-mesh'] = trimesh.Trimesh(vertices=open3d_mesh.vertices,faces=open3d_mesh.triangles)
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in meshes.values()))
-
- mesh = trimesh.util.concatenate(tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
- for g in mesh_scene.geometry.values()))
- # Convert Trimesh to Open3D TriangleMesh
- vertices = o3d.utility.Vector3dVector(mesh.vertices)
- triangles = o3d.utility.Vector3iVector(mesh.faces)
- open3d_mesh = o3d.geometry.TriangleMesh(vertices, triangles)
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- test_pcd_data = np.asarray(test_pcd.points)
-
- diff = max(np.max(test_pcd_data[:, 0]) - np.min(test_pcd_data[:, 0]),np.max(test_pcd_data[:, 1]) - np.min(test_pcd_data[:, 1]),np.max(test_pcd_data[:, 2]) - np.min(test_pcd_data[:, 2]))
- # Checks to make sure units are in mm instead of m (We convert uniformly to meters later)
-
- if(diff < 1):
- open3d_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(open3d_mesh.vertices) * 1000)
- else:
- R = np.array([[-1,0,0],[0,0,1],[0,1,0]])
- open3d_mesh.rotate(R,[0,0,0])
- test_pcd = open3d_mesh.sample_points_uniformly(number_of_points=50000)
- #mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100, origin=[0,0,0])
- #o3d.visualization.draw_geometries([test_pcd,mesh_coordinate_frame])
- rpy_str_list = rpy_str.split()
- rpy_floats = [float(x) for x in rpy_str_list]
- rpy_np = np.array(rpy_floats)
- xyz_str_list = xyz_str.split()
- xyz_floats = [float(x) for x in xyz_str_list]
- if(link_name == "panda_link1"):
- xyz_floats = [0, 0, -0.1929999099]
- elif(link_name == "panda_link3"):
- xyz_floats = [0,0,-0.1219998142]
- elif(link_name == "panda_link5"):
- xyz_floats = [0,0,-0.2630000007]
- elif(link_name == "panda_link7"):
- rpy_np = np.array([0,0,-math.pi/4])
- xyz_floats = [0,0,0.03679997502 + (0.028439417985386614/2)]
- xyz_np = 1000 * np.array(xyz_floats)
- R2 = self.eulerToR(rpy_np)
- # Create a 4x4 identity matrix
- transform_matrix = np.eye(4)
-
- # Replace the top-left 3x3 submatrix with the rotation matrix
- transform_matrix[:3, :3] = R2
-
- # Set the first three elements in the fourth column to the translation vector
- transform_matrix[:3, 3] = xyz_np
-
- # Set the bottom-right element to 1
- transform_matrix[3, 3] = 1.0
- open3d_mesh.transform(transform_matrix)
-
- transform = self.fks_[link_name]
-
- position = Vector3()
- quaternion = Quaternion()
- position.x = transform.pos[0]
- position.y = transform.pos[1]
- position.z = transform.pos[2]
-
- quaternion.w = transform.rot[0]
- quaternion.x = transform.rot[1]
- quaternion.y = transform.rot[2]
- quaternion.z = transform.rot[3]
-
- robot_fk = self.transformStampedToMatrix(quaternion,position)
- t_matrix = self.camera_to_world_ @ robot_fk
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
-
- # REQUIRES CAMERA TO BE IN GAZEBO SCENE
- while True:
- try:
- t = self.tf_buffer_.lookup_transform(
- "camera_color_optical_frame",
- link_name,
- rclpy.time.Time(),
- )
- t_matrix = self.transformStampedToMatrix(t.transform.rotation,t.transform.translation)
- open3d_mesh.transform(t_matrix)
- return open3d_mesh
- except TransformException as ex:
- #TODO Change This
- pass
-
- def transformStampedToMatrix(self,rotation,translation):
- if(self.is_ready_):
- q0 = rotation.w
- q1 = rotation.x
- q2 = rotation.y
- q3 = rotation.z
- # First row of the rotation matrix
- r00 = 2 * (q0 * q0 + q1 * q1) - 1
- r01 = 2 * (q1 * q2 - q0 * q3)
- r02 = 2 * (q1 * q3 + q0 * q2)
-
- # Second row of the rotation matrix
- r10 = 2 * (q1 * q2 + q0 * q3)
- r11 = 2 * (q0 * q0 + q2 * q2) - 1
- r12 = 2 * (q2 * q3 - q0 * q1)
-
- # Third row of the rotation matrix
- r20 = 2 * (q1 * q3 - q0 * q2)
- r21 = 2 * (q2 * q3 + q0 * q1)
- r22 = 2 * (q0 * q0 + q3 * q3) - 1
- t_matrix = np.array(
- [[r00, r01, r02,translation.x * 1000],
- [r10, r11, r12,translation.y * 1000],
- [r20, r21, r22,translation.z * 1000],
- [0,0,0,1]]
- )
- return t_matrix
-
- def setupMeshes(self,rgb,depth,segmentation):
- if(self.is_ready_):
- open3d_mesh = None
- for [filename,link_name,rpy_str,xyz_str] in self.links_info_:
- if open3d_mesh is None:
- open3d_mesh = self.setupMesh(filename,link_name,rpy_str,xyz_str)
- else:
- open3d_mesh += self.setupMesh(filename,link_name,rpy_str,xyz_str)
- pcd = open3d_mesh.sample_points_uniformly(number_of_points=200000)
- pcd.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) / 1000)
- pcd_data = np.asarray(pcd.points)
- np.save('panda_gazebo_pointcloud.npy',pcd_data)
- point_cloud_msg = PointCloud2()
- point_cloud_msg.header = Header()
- point_cloud_msg.header.frame_id = "real_camera_link"
- fields =[PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- point_cloud_msg.height = 1
- point_cloud_msg.width = len(pcd_data)
- point_cloud_msg.fields = fields
- point_cloud_msg.is_bigendian = False
- point_cloud_msg.point_step = 3 * 4
- point_cloud_msg.row_step = point_cloud_msg.point_step * len(pcd_data)
- point_cloud_msg.is_dense = True
- point_cloud_msg.data = bytearray(pcd_data.astype('float32').tobytes())
- self.full_publisher_.publish(point_cloud_msg)
- self.fullPointcloudCallback(point_cloud_msg,rgb,depth,segmentation)
-
- def eulerToR(self,rpy_np):
- if(self.is_ready_):
- rotation_x = rpy_np[0]
- rotation_y = rpy_np[1]
- rotation_z = rpy_np[2]
- Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
- Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
- Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
- R = np.matmul(Rz,Ry)
- R = np.matmul(R,Rx)
- return R
-
- def listenerCallback(self,msg):
- if self.is_ready_:
- self.listenerCallbackOnlineDebug(msg)
- return
-
- def listenerCallbackOnlineDebug(self,msg):
- self.i_ += 1
- online_input_folder = 'franka_to_franka'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- demo_input_folder = online_input_folder + '/demo_' + str(msg.demo_num.data)
- if not os.path.exists(demo_input_folder):
- os.makedirs(demo_input_folder)
- qout_msg = Float64MultiArray()
- qout_msg.data = np.array(msg.joints).tolist()
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.robosuite_rgb_ = msg.rgb
- self.robosuite_qout_list_ = qout_msg.data
- self.hdf5_msg_ = msg
-
- # def listenerCallback(self,msg):
- # if self.is_ready_:
- # self.offline_ = True
- # start_time = time.time()
- # # path_array = msg.rgb.split('/')
- # # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
- # # if not os.path.exists(online_input_folder):
- # # os.makedirs(online_input_folder)
- # # online_input_num_folder = online_input_folder + '/' + path_array[-2]
- # # if not os.path.exists(online_input_num_folder):
- # # os.makedirs(online_input_num_folder)
- # # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # rgb_image = cv2.imread(msg.rgb)
- # # cv2.imwrite(input_rgb_path,rgb_image)
- # rgb_array = rgb_image.flatten().tolist()
- # depth_image = np.load(msg.depth)
- # # path_array = msg.depth.split('/')
- # # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # # np.save(input_depth_path,depth_image)
- # depth_array = depth_image.flatten().tolist()
- # self.seg_file_ = msg.segmentation
- # segmentation_image = cv2.imread(msg.segmentation)
- # # path_array = msg.segmentation.split('/')
- # # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # # cv2.imwrite(input_segmentation_path,segmentation_image)
- # segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
- # joint_array = np.load(msg.joints).tolist()
- # # path_array = msg.joints.split('/')
- # # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
- # # np.save(input_joints_path,np.array(joint_array))
- # input_file_robosuite_data_msg = InputFilesRobosuiteData()
- # input_file_robosuite_data_msg.rgb = rgb_array
- # input_file_robosuite_data_msg.depth_map = depth_array
- # input_file_robosuite_data_msg.segmentation = segmentation_ros_image
- # input_file_robosuite_data_msg.joints = joint_array
- # self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
- # return
- # rgb = msg.rgb
- # depth = msg.depth
- # segmentation = msg.segmentation
- # joints = msg.joints
- # joint_array = np.load(joints)
- # gripper = joint_array[-1]
- # joint_array = joint_array[:-1]
-
- # ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- # scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- # scipy_quaternion = scipy_rotation.as_quat()
- # qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- # self.q_init_ = qout
- # # Hardcoded gripper
- # qout_list = qout.tolist()
- # print(gripper)
- # panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- # qout_list.append(panda_gripper_command)
- # qout_msg = Float64MultiArray()
- # qout_msg.data = qout_list
- # self.panda_joint_command_publisher_.publish(qout_msg)
- # self.joint_commands_callback(qout_msg)
- # self.setupMeshes(rgb,depth,segmentation)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
- # def listenerCallbackOnlineDebug(self,msg):
- # self.i_ += 1
- # online_input_folder = 'offline_ur5e_input'
- # if not os.path.exists(online_input_folder):
- # os.makedirs(online_input_folder)
- # online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- # if not os.path.exists(online_input_num_folder):
- # os.makedirs(online_input_num_folder)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- # cv2.imwrite(online_input_num_folder+'/rgb.png',rgb_np)
- # depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.segmentation.width,msg.segmentation.height))
- # np.save(online_input_num_folder+'/depth.npy',depth_np)
- # seg_color = self.cv_bridge_.imgmsg_to_cv2(msg.segmentation)
- # seg = cv2.cvtColor(seg_color,cv2.COLOR_BGR2GRAY)
- # cv2.imwrite(online_input_num_folder+'/seg.png',seg)
- # np.save(online_input_num_folder+'/joints.npy',np.array(msg.joints))
- # rgb = msg.rgb
- # depth = msg.depth_map
- # segmentation = msg.segmentation
- # joints = msg.joints
- # joint_array = np.array(joints)
- # gripper = joint_array[-1]
- # joint_array = joint_array[:-1]
-
- # ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- # scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- # scipy_quaternion = scipy_rotation.as_quat()
- # qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
-
- # if qout is not None:
- # self.q_init_ = qout
- # # Hardcoded gripper
- # qout_list = qout.tolist()
-
- # panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- # qout_list.append(panda_gripper_command)
- # qout_msg = Float64MultiArray()
- # qout_msg.data = qout_list
- # self.panda_joint_command_publisher_.publish(qout_msg)
- # self.joint_commands_callback(qout_msg)
- # self.robosuite_rgb_ = msg.rgb
- # self.robosuite_depth_ = msg.depth_map
- # self.robosuite_seg_ = msg.segmentation
- # self.robosuite_qout_list_ = qout_list
- # else:
- # print("WARNING HIT IK ON FRANKA ERROR")
-
-
- def listenerCallbackData(self,msg):
- if self.is_ready_:
- start_time = time.time()
- segmentation_data = msg.segmentation
-
- segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
-
- rgb = msg.rgb
- rgb = np.array(rgb) \
- .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
-
-
-
- depth_map = msg.depth_map
- depth_map = np.array(depth_map) \
- .reshape(segmentation_data.shape)
-
-
- joint_array = msg.joints
- gripper = joint_array[-1]
- joint_array = joint_array[:-1]
-
- ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- self.q_init_ = qout
-
- # Hardcoded gripper
- qout_list = qout.tolist()
- panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
- qout_list.append(panda_gripper_command)
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.panda_joint_command_publisher_.publish(qout_msg)
- self.joint_commands_callback(qout_msg)
- self.setupMeshes(rgb, depth_map, segmentation_data)
- # end_time = time.time()
- # print("Total time: " + str(end_time - start_time) + " s")
-
-
- def joint_commands_callback(self, msg):
- self.joint_state_msg.header.stamp = self.get_clock().now().to_msg()
- #self.joint_state_msg.name = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint"] # Replace with your joint names
- self.joint_state_msg.name = ["panda_joint1", "panda_joint2", "panda_joint3","panda_joint4", "panda_joint5", "panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
- msg.data.append(msg.data[-1])
- #msg.data.append(gripper_val)
- #msg.data.append(gripper_val)
- # for i in range(5):
- # if(i == 1 or i == 4):
- # msg.data.append(gripper_val)
- # else:
- # msg.data.append(gripper_val)
- self.joint_state_msg.position = msg.data
- self.thetas_ = {key:value for key,value in zip(self.joint_state_msg.name,msg.data)}
- self.fks_ = self.chain_.forward_kinematics(self.thetas_)
- self.panda_joint_state_publisher_.publish(self.joint_state_msg)
-
-
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- write_data = WriteData()
-
- rclpy.spin(write_data)
-
- # Destroy the node explicitly
- # (optional - otherwise it will be done automatically
- # when the garbage collector destroys the node object)
- write_data.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_gripper_multi_real_better_early_inpaint.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_gripper_multi_real_better_early_inpaint.py
new file mode 100644
index 0000000..0688f7e
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_gripper_multi_real_better_early_inpaint.py
@@ -0,0 +1,1014 @@
+#!/usr/bin/env python3
+
+import rclpy
+from rclpy.node import Node
+import trimesh
+from std_msgs.msg import Header, Float64MultiArray,Bool
+from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
+import numpy as np
+from launch_ros.substitutions import FindPackageShare
+from launch.substitutions import PathJoinSubstitution
+import os
+import glob
+import subprocess
+import xml.etree.ElementTree as ET
+from functools import partial
+import math
+from message_filters import TimeSynchronizer, Subscriber
+from tf2_ros import TransformException
+from tf2_ros.buffer import Buffer
+from tf2_ros.transform_listener import TransformListener
+from sensor_msgs_py import point_cloud2
+import cv2
+from cv_bridge import CvBridge
+import time
+from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
+from sensor_msgs.msg import JointState
+from tracikpy import TracIKSolver
+from mdh.kinematic_chain import KinematicChain
+from mdh import UnReachable
+from geometry_msgs.msg import Vector3, Quaternion
+from scipy.spatial.transform import Rotation as R
+from message_filters import ApproximateTimeSynchronizer, Subscriber
+import pathlib
+
+
+class WriteData(Node):
+ def __init__(self):
+ super().__init__('write_data_node')
+ self.is_ready_ = False
+ self.thetas_ = None
+ self.debug_ = True
+ self.float_image_ = False
+ file_directory = pathlib.Path(__file__).parent.resolve()
+ self.panda_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
+ self.panda_solver_ = TracIKSolver(self.panda_urdf_,"panda_link0","panda_ee")
+ self.ur5_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/ur5_ik_real.urdf')
+ self.ur5_solver_ = TracIKSolver(self.ur5_urdf_,"base_link","ur5_ee_gripper")
+
+ # real_camera_link to world and then multiply translation by 1000
+ # self.camera_to_world_ = np.array([[0,1,0,0],
+ # [0.258,0,-0.966,989],
+ # [-0.966,0,-0.258,1919],
+ # [0,0,0,1]])
+ self.camera_to_world_ = np.array([[0.000, 1.000, 0.000 , -1],
+ [0.706, 0.000 ,-0.708, 603],
+ [-0.708, 0.000, -0.706 , 1307],
+ [0,0,0,1]])
+
+ self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
+ [ 0. , 101.39696962, 42. ],
+ [ 0. , 0. , 1. ]])
+ self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
+ self.panda_joint_state_publisher_ = self.create_publisher(
+ JointState,
+ '/joint_states', # Topic name for /joint_states
+ 1)
+ self.joint_state_subscriber_ = Subscriber(self,JointState,'/gazebo_joint_states')
+ self.ur5_rgb_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/image_raw')
+ self.ur5_depth_image_subscriber_ = Subscriber(self,Image,'/ur5_camera/depth/image_raw')
+ self.panda_rgb_image_subscriber_ = Subscriber(self,Image,'/panda_camera/image_raw')
+ self.panda_depth_image_subscriber_ = Subscriber(self,Image,'/panda_camera/depth/image_raw')
+ self.subscription_ = self.create_subscription(
+ InputFilesRealDataMulti,
+ '/input_files',
+ self.listenerCallback,
+ 1)
+ self.subscription_data_ = self.create_subscription(
+ InputFilesRealDataMulti,
+ 'input_files_data_real',
+ self.listenerCallbackOnlineDebug,
+ 1)
+ self.joint_state_msg = JointState()
+
+ #Harcoding start position
+ self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
+
+ self.publishers_ = []
+ self.subscribers_ = []
+ self.timers_ = []
+ self.left_real_rgb_ = None
+ self.right_real_rgb_ = None
+ self.left_real_depth_ = None
+ self.right_real_depth_ = None
+ self.real_seg_ = None
+ self.real_qout_list_ = None
+ self.i_ = -1
+ self.offline_ = False
+ self.seg_file_ = None
+ self.tf_buffer_ = Buffer()
+ self.tf_listener_ = TransformListener(self.tf_buffer_, self)
+ self.camera_intrinsic_matrix_ = None
+ self.image_shape_ = None
+ # REMEMBER TO HARDCODE THIS
+ self.robosuite_image_shape_ = (84,84)
+ self.camera_intrinsic_subscription_ = self.create_subscription(
+ CameraInfo,
+ '/camera/camera_info',
+ self.cameraInfoCallback,
+ 1
+ )
+ self.camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/camera/image_raw',
+ self.cameraCallback,
+ 1
+ )
+
+ self.ur5_left_rgb_ = None
+ self.ur5_left_depth_ = None
+ self.panda_left_rgb_ = None
+ self.panda_left_depth_ = None
+ self.ur5_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_left_camera/image_raw',
+ self.ur5LeftRgbCallback,
+ 1
+ )
+
+ self.ur5_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_left_camera/depth/image_raw',
+ self.ur5LeftDepthCallback,
+ 1
+ )
+
+ self.panda_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_left_camera/image_raw',
+ self.pandaLeftRgbCallback,
+ 1
+ )
+
+ self.panda_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_left_camera/depth/image_raw',
+ self.pandaLeftDepthCallback,
+ 1
+ )
+
+ self.ur5_right_rgb_ = None
+ self.ur5_right_depth_ = None
+ self.panda_right_rgb_ = None
+ self.panda_right_depth_ = None
+ self.ur5_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_right_camera/image_raw',
+ self.ur5RightRgbCallback,
+ 1
+ )
+
+ self.ur5_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_right_camera/depth/image_raw',
+ self.ur5RightDepthCallback,
+ 1
+ )
+
+ self.panda_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/image_raw',
+ self.pandaRightRgbCallback,
+ 1
+ )
+
+ self.panda_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/depth/image_raw',
+ self.pandaRightDepthCallback,
+ 1
+ )
+
+ self.joint_subscription_ = self.create_subscription(
+ JointState,
+ '/gazebo_joint_states',
+ self.noTimeGazeboCallback,
+ 1
+ )
+
+ self.ur5_left_no_gripper_rgb_ = None
+ self.ur5_left_no_gripper_depth_ = None
+ self.ur5_right_no_gripper_rgb_ = None
+ self.ur5_right_no_gripper_depth_ = None
+ self.ur5_no_gripper_left_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_no_gripper_left_camera/image_raw',
+ self.ur5LeftNoGripperRgbCallback,
+ 1
+ )
+
+ self.ur5_no_gripper_left_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_no_gripper_left_camera/depth/image_raw',
+ self.ur5LeftNoGripperDepthCallback,
+ 1
+ )
+
+ self.ur5_no_gripper_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_no_gripper_right_camera/image_raw',
+ self.ur5RightNoGripperRgbCallback,
+ 1
+ )
+
+ self.panda_no_gripper_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_no_gripper_right_camera/depth/image_raw',
+ self.ur5RightNoGripperDepthCallback,
+ 1
+ )
+
+ self.cv_bridge_ = CvBridge()
+ self.mask_image_publisher_ = self.create_publisher(Image,"mask_image",1)
+ self.ready_for_next_input_publisher_ = self.create_publisher(Bool,"/ready_for_next_input",1)
+
+ self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
+ self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
+ #self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
+ self.full_mask_image_publisher_ = self.create_publisher(MultipleInpaintImages,"full_mask_image",1)
+ self.updated_joints_ = False
+ self.is_ready_ = True
+
+ def ur5LeftRgbCallback(self,msg):
+ self.ur5_left_rgb_ = msg
+
+ def ur5LeftDepthCallback(self,msg):
+ self.ur5_left_depth_ = msg
+
+ def pandaLeftRgbCallback(self,msg):
+ self.panda_left_rgb_ = msg
+
+ def pandaLeftDepthCallback(self,msg):
+ self.panda_left_depth_ = msg
+
+ def ur5RightRgbCallback(self,msg):
+ self.ur5_right_rgb_ = msg
+
+ def ur5RightDepthCallback(self,msg):
+ self.ur5_right_depth_ = msg
+
+ def pandaRightRgbCallback(self,msg):
+ self.panda_right_rgb_ = msg
+
+ def pandaRightDepthCallback(self,msg):
+ self.panda_right_depth_ = msg
+
+ def ur5LeftNoGripperRgbCallback(self,msg):
+ self.ur5_left_no_gripper_rgb_ = msg
+
+ def ur5LeftNoGripperDepthCallback(self,msg):
+ self.ur5_left_no_gripper_depth_ = msg
+
+ def ur5RightNoGripperRgbCallback(self,msg):
+ self.ur5_right_no_gripper_rgb_ = msg
+
+ def ur5RightNoGripperDepthCallback(self,msg):
+ self.ur5_right_no_gripper_depth_ = msg
+
+ def noTimeGazeboCallback(self,joint_msg):
+ start_time = time.time()
+ left_inpainted_image, left_mask = self.doFullInpainting(self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_rgb_,self.panda_left_depth_,self.ur5_left_no_gripper_depth_,self.left_real_rgb_,self.left_real_depth_)
+ right_inpainted_image, right_mask = self.doFullInpainting(self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_rgb_,self.panda_right_depth_,self.ur5_right_no_gripper_depth_,self.right_real_rgb_,self.right_real_depth_)
+ inpainted_image_msg = MultipleInpaintImages()
+ mask_image_msg = MultipleInpaintImages()
+ inpainted_image_msg.images = [self.cv_bridge_.cv2_to_imgmsg(left_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(right_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(left_mask),self.cv_bridge_.cv2_to_imgmsg(right_mask)]
+ mask_image_msg.images = []
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.full_mask_image_publisher_.publish(mask_image_msg)
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('left_inpainting'):
+ os.makedirs('left_inpainting')
+ if not os.path.exists('right_inpainting'):
+ os.makedirs('right_inpainting')
+ if not os.path.exists('left_mask'):
+ os.makedirs('left_mask')
+ if not os.path.exists('right_mask'):
+ os.makedirs('right_mask')
+ if(self.float_image_):
+ left_inpainted_image = (left_inpainted_image * 255).astype(np.uint8)
+ right_inpainted_image = (right_inpainted_image * 255).astype(np.uint8)
+ left_mask = (left_mask * 255).astype(np.uint8)
+ right_mask = (right_mask * 255).astype(np.uint8)
+ cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
+ cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
+ cv2.imwrite('left_mask/mask'+ str(inpaint_number) +'.png',left_mask)
+ cv2.imwrite('right_mask/mask'+ str(inpaint_number) +'.png',right_mask)
+
+ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth,gazebo_no_gripper_depth,world_rgb,world_depth):
+ real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
+ real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
+ gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
+
+ gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
+ cv2.imwrite('real_rgb.png',real_rgb_np)
+ cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
+ cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
+ cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
+ gazebo_no_gripper_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_no_gripper_depth)
+
+ real_seg_np = (real_depth_np < 8).astype(np.uint8)
+ real_seg_255_np = 255 * real_seg_np
+ gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
+ gazebo_seg_255_np = 255 * gazebo_seg_np
+ cv2.imwrite('real_seg.png',real_seg_255_np)
+ cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
+ real_no_gripper_seg_255_np = (gazebo_no_gripper_depth_np < 8).astype(np.uint8)
+ real_no_gripper_seg_255_np = 255 * real_no_gripper_seg_255_np
+ real_rgb = world_rgb
+ real_depth = world_depth
+ real_seg = real_seg_255_np
+ gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
+ return self.dummyInpaintingEarly(real_rgb,real_seg,real_no_gripper_seg_255_np,gazebo_rgb_np,gazebo_seg_255_np)
+
+ def cameraInfoCallback(self,msg):
+ if(self.is_ready_):
+ self.camera_intrinsic_matrix_ = np.array([[msg.k[0],msg.k[1],msg.k[2],0],[msg.k[3],msg.k[4],msg.k[5],0],[msg.k[6],msg.k[7],msg.k[8],0]])
+ self.image_shape_ = (msg.height,msg.width,3)
+
+ def cameraCallback(self,msg):
+ if(self.is_ready_):
+ self.original_image_ = self.cv_bridge_.imgmsg_to_cv2(msg,desired_encoding="passthrough")
+ #cv2.imwrite('gazebo_rgb.png',self.original_image_)
+
+ def getPixels(self,msg):
+ if(self.is_ready_):
+ msg_data = point_cloud2.read_points(msg)
+ msg_data = np.array([[item.tolist() for item in row] for row in msg_data])
+ depth_data = msg_data[:,2]
+ camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
+ pixel_data = self.project_points_from_world_to_camera(msg_data,camera_transform,self.image_shape_[1],self.image_shape_[0])
+ return pixel_data, depth_data
+
+ def project_points_from_world_to_camera(self,points, world_to_camera_transform, camera_height, camera_width,pixel_to_point_dicts=False):
+ """
+ Helper function to project a batch of points in the world frame
+ into camera pixels using the world to camera transformation.
+
+ Args:
+ points (np.array): 3D points in world frame to project onto camera pixel locations. Should
+ be shape [..., 3].
+ world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel
+ coordinates.
+ camera_height (int): height of the camera image
+ camera_width (int): width of the camera image
+
+ Return:
+ pixels (np.array): projected pixel indices of shape [..., 2]
+ """
+ assert points.shape[-1] == 3 # last dimension must be 3D
+ assert len(world_to_camera_transform.shape) == 2
+ assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4
+
+ # convert points to homogenous coordinates -> (px, py, pz, 1)
+ ones_pad = np.ones(points.shape[:-1] + (1,))
+ points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]
+
+ # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform
+ mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]
+ cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]
+ pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]
+
+ # re-scaling from homogenous coordinates to recover pixel values
+ # (x, y, z) -> (x / z, y / z)
+ pixels = pixels / pixels[..., 2:3]
+ pixels = pixels[..., :2].round().astype(int) # shape [..., 2]
+ # swap first and second coordinates to get pixel indices that correspond to (height, width)
+ # and also clip pixels that are out of range of the camera image
+ pixels = np.concatenate(
+ (
+ pixels[..., 1:2].clip(0, camera_height - 1),
+ pixels[..., 0:1].clip(0, camera_width - 1),
+ ),
+ axis=-1,
+ )
+ if(pixel_to_point_dicts):
+ points_to_pixels_robosuite = {tuple(arr_4): tuple(arr_2) for arr_4, arr_2 in zip(points,pixels)}
+ pixels_to_points_robosuite = {}
+ for key,value in points_to_pixels_robosuite.items():
+ pixels_to_points_robosuite.setdefault(value,[]).append(key)
+ return pixels,points_to_pixels_robosuite,pixels_to_points_robosuite
+ else:
+ return pixels
+
+ def normalize_depth_image(self,depth_image):
+ # Define the minimum and maximum depth values in your depth image
+ min_depth = np.min(depth_image)
+ max_depth = np.max(depth_image)
+
+ # Normalize the depth image to the range [0, 1]
+ normalized_depth_image = (depth_image - min_depth) / (max_depth - min_depth)
+
+ # Optionally, you can scale the normalized image to a different range
+ # For example, to scale it to [0, 255] for visualization:
+ normalized_depth_image = (normalized_depth_image * 255).astype(np.uint8)
+ return normalized_depth_image
+
+ def dummyInpaintingEarly(self,rgb,seg_file,seg_file_no_gripper,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file_no_gripper = cv2.threshold(seg_file_no_gripper, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('seg_file.png',seg_file)
+ cv2.imwrite('seg_file_no_gripper.png',seg_file_no_gripper)
+ seg_file_gripper = abs(seg_file - seg_file_no_gripper)
+ seg_file_gripper = cv2.erode(seg_file_gripper,np.ones((3,3),np.uint8),iterations=3)
+ seg_file_gripper = cv2.dilate(seg_file_gripper,np.ones((3,3),np.uint8),iterations=10)
+ cv2.imwrite('seg_file_gripper.png',seg_file_gripper)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file_no_gripper)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=30)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255 + seg_file_gripper
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre3.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ diffusion_input = cv2.bitwise_and(inpainted_image,inpainted_image,mask=cv2.bitwise_not(better_dilated_blend_mask))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ diffusion_input = (diffusion_input / 255.0).astype(np.float32)
+ return inpainted_image,diffusion_input
+
+ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(gazebo_seg)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ #cv2.imwrite('inverted_seg_file_original.png',inverted_seg_file_original)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ #cv2.imwrite('inverted_seg_file.png',inverted_seg_file)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=inverted_seg_file)
+ #cv2.imwrite('background_only.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ #cv2.imwrite('no_fill.png',inpainted_image)
+
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ cv2_inpaint_image = cv2.inpaint(inpainted_image,better_dilated_blend_mask,3,cv2.INPAINT_TELEA)
+
+ target_color = (39,43,44)
+ target_mask = np.zeros_like(inpainted_image)
+ target_mask[:,:] = target_color
+ inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+ # inpainted_image = cv2_inpaint_image
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('inpainting'):
+ os.makedirs('inpainting')
+ if not os.path.exists('mask'):
+ os.makedirs('mask')
+ cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
+ cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
+ if(self.float_image_):
+ import pdb
+ pdb.set_trace()
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image,encoding="bgr8")
+ mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.mask_image_publisher_.publish(mask_image_msg)
+
+# better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
+# better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
+# target_color = (39,43,44)
+# target_mask = np.zeros_like(inpainted_image)
+# target_mask[:,:] = target_color
+# got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+# #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
+# image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
+# image_8bit = got_away
+ def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth):
+ return
+ if type(rgb) == str:
+ rgb_np = cv2.imread(rgb)
+ seg = cv2.imread(seg_file,0)
+ depth_np = np.load(depth)
+ return
+ else:
+ rgb_np = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ depth_np = depth
+ seg = seg_file
+ _, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_BINARY)
+ #rgb_np = cv2.resize(rgb_np,(128,128))
+ #depth_np = cv2.resize(depth_np,(128,128))
+ #seg = cv2.resize(seg,(128,128))
+ #gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ #gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ #gazebo_depth = cv2.resize(gazebo_depth,(128,128))
+ real_depth_image_unmasked = depth_np
+ real_rgb_image_unmasked = rgb_np
+ real_segmentation_mask_255 = seg
+ gazebo_robot_only_rgb = gazebo_rgb
+ gazebo_segmentation_mask_255 = gazebo_seg
+ gazebo_robot_only_depth = gazebo_depth
+ real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(real_segmentation_mask_255)
+ inverted_real_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=10)
+ cv2.imwrite('inverted_real_segmentation_mask_255.png',inverted_real_segmentation_mask_255)
+ outline_mask = abs(inverted_real_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
+ real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
+ real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
+
+ # real_rgb_image_masked = cv2.inpaint(real_rgb_image_masked,outline_mask,3,cv2.INPAINT_TELEA)
+ real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
+ joined_depth = np.concatenate((gazebo_robot_only_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
+ joined_depth[0,:,:][joined_depth[0,:,:] == 0] = 1000
+ joined_depth[1,:,:][joined_depth[1,:,:] == 0] = 5
+ joined_depth_argmin = np.argmin(joined_depth,axis=0)
+
+ real_rgb_image_masked_inpaint = real_rgb_image_masked
+
+
+ attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
+ inverted_joined_depth_argmin = 1 - joined_depth_argmin
+ gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB)
+ gazebo_robot_only_lab[:,:,0] += 100
+ gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ gazebo_robot_only_rgb = gazebo_robot_only_mod
+ attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
+
+ inpainted_image = cv2.add(attempt,attempt2)#attempt + attempt2
+
+ better_blend_mask = joined_depth_argmin * real_segmentation_mask_255
+ better_dilated_blend_mask = joined_depth_argmin * cv2.bitwise_not(inverted_real_segmentation_mask_255)
+ target_color = (39,43,44)
+ target_mask = np.zeros_like(inpainted_image)
+ target_mask[:,:] = target_color
+ got_away = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+ #image_8bit = cv2.inpaint(inpainted_image.astype(np.uint8),better_blend_mask.astype(np.uint8),3,cv2.INPAINT_TELEA)
+ image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
+ image_8bit = got_away
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('inpainting'):
+ os.makedirs('inpainting')
+ if not os.path.exists('mask'):
+ os.makedirs('mask')
+ cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',image_8bit)
+ cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_blend_mask.astype(np.uint8))
+ # cv2.imwrite('small_inpaint.png',cv2.resize(image_8bit,(128,128)))
+
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
+ mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_blend_mask.astype(np.uint8),encoding="mono8")
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.mask_image_publisher_.publish(mask_image_msg)
+ ready_for_next_message = Bool()
+ ready_for_next_message.data = True
+ self.ready_for_next_input_publisher_.publish(ready_for_next_message)
+ return
+
+ transformed_gazebo_rgb,transformed_gazebo_depth = self.transformGazeboImage(gazebo_robot_only_rgb,gazebo_segmentation_mask_255,pointcloud_msg)
+ real_rgbd_image_unmasked = np.concatenate((real_rgb_image_unmasked,real_depth_image_unmasked[:,:,np.newaxis]),axis=2)
+ inverted_real_segmentation_mask_255 = cv2.bitwise_not(real_segmentation_mask_255)
+ real_rgbd_image_masked = cv2.bitwise_and(real_rgbd_image_unmasked,real_rgbd_image_unmasked,mask=inverted_real_segmentation_mask_255)
+ real_rgb_image_masked = real_rgbd_image_masked[:,:,0:3].astype(np.uint8)
+ real_depth_image_masked = real_rgbd_image_masked[:,:,-1]
+ joined_depth = np.concatenate((transformed_gazebo_depth[np.newaxis],real_depth_image_masked[np.newaxis]),axis=0)
+ joined_depth[joined_depth == 0] = 1000
+ joined_depth_argmin = np.argmin(joined_depth,axis=0)
+ real_rgb_image_masked_inpaint = cv2.inpaint(real_rgb_image_masked,real_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA)
+ attempt = real_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis]
+ inverted_joined_depth_argmin = 1 - joined_depth_argmin
+ attempt2 = transformed_gazebo_rgb * inverted_joined_depth_argmin[:,:,np.newaxis]
+ inpainted_image = attempt + attempt2
+ image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image
+ last_slash_index = seg_file.rfind('/')
+ underscore_before_last_slash_index = seg_file.rfind('_', 0, last_slash_index)
+ str_num = seg_file[underscore_before_last_slash_index + 1:last_slash_index]
+ inpaint_file = seg_file[:seg_file.rfind('/', 0, seg_file.rfind('/')) + 1] + 'results/inpaint' + str_num +'.png'
+
+ cv2.imwrite(inpaint_file,image_8bit)
+ inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(image_8bit,encoding="bgr8")
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+
+ def custom_mode_filter(self,image, mask, kernel_size):
+ result = np.zeros_like(image)
+ pad = kernel_size // 2
+ for i in range(pad, image.shape[0] - pad):
+ for j in range(pad, image.shape[1] - pad):
+ if mask[i, j]:
+ neighborhood = image[i - pad: i + pad + 1, j - pad: j + pad + 1]
+ unique, counts = np.unique(neighborhood.reshape(-1, 3), axis=0, return_counts=True)
+ mode_color = unique[np.argmax(counts)]
+ result[i, j] = mode_color
+
+ return result
+
+ def replace_black_with_surrounding_mode(self,img):
+
+ # Convert the image to grayscale
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+
+ # Threshold the grayscale image to identify black pixels
+ _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
+
+ # Invert the mask to mark black pixels as True (255) and others as False (0)
+ #mask = np.where(thresh == 0, 255, 0)
+ mask = thresh
+ # Apply custom mode filter to find the mode RGB value of the surrounding pixels
+ replaced_img = self.custom_mode_filter(img, mask, kernel_size=3)
+
+ return replaced_img
+
+ def transformGazeboImage(self,gazebo_rgb,gazebo_seg,pointcloud_msg):
+ pointcloud = point_cloud2.read_points(pointcloud_msg)
+ # Pointcloud is in camera frame
+ pointcloud = np.array([[item.tolist() for item in row] for row in pointcloud])
+
+ gazebo_camera_transform = np.vstack((self.camera_intrinsic_matrix_,np.array([0,0,0,1]).reshape(1,4)))
+ _,points_to_pixels_gazebo,pixels_to_points_gazebo = self.project_points_from_world_to_camera(pointcloud,gazebo_camera_transform,self.image_shape_[0],self.image_shape_[1],True)
+ robosuite_camera_transform = np.hstack((self.robosuite_intrinsic_matrix_,np.array([0,0,0]).reshape(3,1)))
+ robosuite_camera_transform = np.vstack((robosuite_camera_transform,np.array([0,0,0,1]).reshape(1,4)))
+ robosuite_img_points,points_to_pixels_robosuite,pixels_to_points_robosuite = self.project_points_from_world_to_camera(pointcloud,robosuite_camera_transform,self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],True)
+ transformed_gazebo_rgb_noisy = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1],3))
+ transformed_gazebo_seg = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1]))
+ gazebo_depth = np.zeros((self.robosuite_image_shape_[0],self.robosuite_image_shape_[1])) * 1000
+ for pixel in pixels_to_points_robosuite.keys():
+ points = pixels_to_points_robosuite[pixel]
+ points_np = np.array([list(point) for point in points])
+ depth_value = np.mean(points_np[:,2])
+ gazebo_pixels = []
+ for point in points:
+ gazebo_pixels.append(list(points_to_pixels_gazebo[point]))
+ rgb_values = []
+ for gazebo_pixel in gazebo_pixels:
+ rgb_values.append(gazebo_rgb[gazebo_pixel[0],gazebo_pixel[1],:])
+
+ rgb_values_np = np.array(rgb_values)
+ mean_rgb_value = np.mean(rgb_values_np,axis=0).astype(int)
+ transformed_gazebo_rgb_noisy[pixel[0],pixel[1],:] = mean_rgb_value
+ gazebo_depth[pixel[0],pixel[1]] = depth_value
+ transformed_gazebo_seg[pixel[0],pixel[1]] = 255
+ gazebo_depth_after= cv2.dilate(gazebo_depth,(3,3),iterations=3)
+ gazebo_depth_after = cv2.erode(gazebo_depth_after,(3,3),iterations=3)
+ transformed_gazebo_seg_after= cv2.dilate(transformed_gazebo_seg,(3,3),iterations=3)
+ transformed_gazebo_seg_after = cv2.erode(transformed_gazebo_seg_after,(3,3),iterations=3)
+ transformed_gazebo_rgb_noisy = transformed_gazebo_rgb_noisy.astype(np.uint8)
+ transformed_gazebo_seg_inverted = np.where(transformed_gazebo_seg == 0,255,0)
+ #transformed_gazebo_seg_after_inverted = np.where(transformed_gazebo_seg_after == 0,255,0)
+ #transformed_gazebo_seg_gray = np.where(transformed_gazebo_seg_after == 0,128,255).astype(np.uint8)
+ inpainting_mask = cv2.bitwise_and(transformed_gazebo_seg_inverted,transformed_gazebo_seg_inverted,mask=transformed_gazebo_seg_after.astype(np.uint8))
+ transformed_gazebo_rgb = cv2.inpaint(transformed_gazebo_rgb_noisy,inpainting_mask.astype(np.uint8),inpaintRadius=3,flags=cv2.INPAINT_TELEA)
+ return transformed_gazebo_rgb,gazebo_depth_after
+
+ def fullPointcloudCallback(self,msg,rgb,depth,segmentation):
+ if(self.is_ready_):
+ if(self.camera_intrinsic_matrix_ is None):
+ return
+ all_pixels,depth_data = self.getPixels(msg)
+ mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
+ clean_mask_image = np.zeros(self.image_shape_[:2], dtype=np.uint8)
+ depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
+ clean_depth_image = np.zeros(self.image_shape_[:2], dtype=np.float64)
+ white_color = (255,255,255)
+ i = 0
+ block_background_mask = np.all(self.original_image_ != [155,155,155],axis=2)
+ for coord in all_pixels:
+ x,y = coord
+ mask_image[round(x), round(y)] = 255
+ depth_image[round(x), round(y)] = depth_data[i]
+ if(block_background_mask[round(x),round(y)]):
+ clean_mask_image[round(x),round(y)] = 255
+ clean_depth_image[round(x),round(y)] = depth_data[i]
+ i += 1
+ cv2.imwrite('clean_mask_image.png',clean_mask_image)
+ cv2.imwrite('mask_image.png',mask_image)
+ cv2.imwrite('depth_image.png',self.normalize_depth_image(depth_image))
+ mask_image = clean_mask_image
+ depth_image = clean_depth_image
+ if(self.original_image_ is not None):
+ mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
+ gazebo_masked_image = np.zeros_like(self.original_image_)
+ gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=clean_mask_image)
+ cv2.imwrite('original_image.png',self.original_image_)
+ cv2.imwrite('gazebo_masked_image.png',gazebo_masked_image)
+ self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,depth_image)
+ return
+ np.save('/home/lawrence/gazebo_robot_depth.npy',depth_image)
+ old_mask_image = mask_image
+ mask_image = cv2.cvtColor(mask_image, cv2.COLOR_RGB2GRAY)
+ _, mask_image = cv2.threshold(mask_image, 128, 255, cv2.THRESH_BINARY)
+ if(self.original_image_ is not None):
+ mask_image = cv2.resize(mask_image, (mask_image.shape[1], mask_image.shape[0]))
+ im_floodfill = mask_image.copy()
+ (h,w) = mask_image.shape
+ mask = np.zeros((h+2,w+2),np.uint8)
+ cv2.floodFill(im_floodfill,mask,(0,0),255)
+ im_floodfill_inv = cv2.bitwise_not(im_floodfill)
+ im_out = mask_image | im_floodfill_inv
+ mask_image = im_out
+ # Create a new image with the same size as the original image
+ gazebo_masked_image = np.zeros_like(self.original_image_)
+
+ # Apply the gazebo_mask to the original image using element-wise multiplication
+ gazebo_masked_image = cv2.bitwise_and(self.original_image_, self.original_image_, mask=mask_image)
+ gazebo_masked_image[:, :, 0], gazebo_masked_image[:, :, 2] = gazebo_masked_image[:, :, 2].copy(), gazebo_masked_image[:, :, 0].copy()
+ cv2.imwrite('/home/lawrence/gazebo_robot_only.jpg',gazebo_masked_image)
+ cv2.imwrite('/home/lawrence/gazebo_mask.jpg',mask_image)
+ #mask_image = cv2.convertScaleAbs(mask_image, alpha=(255.0/65535.0))
+ ros_mask_image = self.cv_bridge_.cv2_to_imgmsg(old_mask_image,encoding="bgr8")
+ self.full_mask_image_publisher_.publish(ros_mask_image)
+ self.inpainting(rgb,depth,segmentation,gazebo_masked_image,mask_image,msg)
+
+ def transformStampedToMatrix(self,rotation,translation):
+ if(self.is_ready_):
+ q0 = rotation.w
+ q1 = rotation.x
+ q2 = rotation.y
+ q3 = rotation.z
+ # First row of the rotation matrix
+ r00 = 2 * (q0 * q0 + q1 * q1) - 1
+ r01 = 2 * (q1 * q2 - q0 * q3)
+ r02 = 2 * (q1 * q3 + q0 * q2)
+
+ # Second row of the rotation matrix
+ r10 = 2 * (q1 * q2 + q0 * q3)
+ r11 = 2 * (q0 * q0 + q2 * q2) - 1
+ r12 = 2 * (q2 * q3 - q0 * q1)
+
+ # Third row of the rotation matrix
+ r20 = 2 * (q1 * q3 - q0 * q2)
+ r21 = 2 * (q2 * q3 + q0 * q1)
+ r22 = 2 * (q0 * q0 + q3 * q3) - 1
+ t_matrix = np.array(
+ [[r00, r01, r02,translation.x * 1000],
+ [r10, r11, r12,translation.y * 1000],
+ [r20, r21, r22,translation.z * 1000],
+ [0,0,0,1]]
+ )
+ return t_matrix
+
+ def eulerToR(self,rpy_np):
+ if(self.is_ready_):
+ rotation_x = rpy_np[0]
+ rotation_y = rpy_np[1]
+ rotation_z = rpy_np[2]
+ Rx = np.array([[1,0,0],[0,np.cos(rotation_x),-np.sin(rotation_x)],[0,np.sin(rotation_x),np.cos(rotation_x)]])
+ Ry = np.array([[np.cos(rotation_y),0,np.sin(rotation_y)],[0,1,0],[-np.sin(rotation_y),0,np.cos(rotation_y)]])
+ Rz = np.array([[np.cos(rotation_z),-np.sin(rotation_z),0],[np.sin(rotation_z),np.cos(rotation_z),0],[0,0,1]])
+ R = np.matmul(Rz,Ry)
+ R = np.matmul(R,Rx)
+ return R
+
+ def replace_nan_with_neighbors_average(self,array):
+ def replace_function(subarray):
+ center = subarray[4]
+ if(center != np.nan):
+ return center
+ valid_values = subarray[subarray != -np.inf]
+ if valid_values.size == 0:
+ return np.nan
+ else:
+ return np.nanmean(valid_values)
+
+ return generic_filter(array, replace_function, size=(3, 3), mode='constant', cval=np.nan)
+
+ def listenerCallback(self,msg):
+ if self.is_ready_:
+ self.offline_ = True
+ start_time = time.time()
+ # path_array = msg.rgb.split('/')
+ # online_input_folder = '/'.join(path_array[:-3]) + '/offline_ur5e_input'
+ # if not os.path.exists(online_input_folder):
+ # os.makedirs(online_input_folder)
+ # online_input_num_folder = online_input_folder + '/' + path_array[-2]
+ # if not os.path.exists(online_input_num_folder):
+ # os.makedirs(online_input_num_folder)
+ # input_rgb_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ rgb_image = cv2.imread(msg.rgb)
+ # cv2.imwrite(input_rgb_path,rgb_image)
+ rgb_array = rgb_image.flatten().tolist()
+ depth_image = np.load(msg.depth)
+ # path_array = msg.depth.split('/')
+ # input_depth_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ # np.save(input_depth_path,depth_image)
+ depth_array = depth_image.flatten().tolist()
+ self.seg_file_ = msg.segmentation
+ segmentation_image = cv2.imread(msg.segmentation)
+ # path_array = msg.segmentation.split('/')
+ # input_segmentation_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ # cv2.imwrite(input_segmentation_path,segmentation_image)
+ segmentation_ros_image = self.cv_bridge_.cv2_to_imgmsg(segmentation_image)
+ joint_array = np.load(msg.joints).tolist()
+ # path_array = msg.joints.split('/')
+ # input_joints_path = online_input_folder + '/' +'/'.join(path_array[-2:])
+ # np.save(input_joints_path,np.array(joint_array))
+ input_file_robosuite_data_msg = InputFilesRobosuiteData()
+ input_file_robosuite_data_msg.rgb = rgb_array
+ input_file_robosuite_data_msg.depth_map = depth_array
+ input_file_robosuite_data_msg.segmentation = segmentation_ros_image
+ input_file_robosuite_data_msg.joints = joint_array
+ self.listenerCallbackOnlineDebug(input_file_robosuite_data_msg)
+ return
+ rgb = msg.rgb
+ depth = msg.depth
+ segmentation = msg.segmentation
+ joints = msg.joints
+ joint_array = np.load(joints)
+ gripper = joint_array[-1]
+ joint_array = joint_array[:-1]
+
+ ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
+ self.q_init_ = qout
+ # Hardcoded gripper
+ qout_list = qout.tolist()
+ panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
+ qout_list.append(panda_gripper_command)
+ qout_msg = Float64MultiArray()
+ qout_msg.data = qout_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.joint_commands_callback(qout_msg)
+ self.setupMeshes(rgb,depth,segmentation)
+ end_time = time.time()
+
+ def listenerCallbackOnlineDebug(self,msg):
+ if(len(msg.data_pieces) == 2):
+ start_time = time.time()
+ self.i_ += 1
+ online_input_folder = 'offline_ur5e_input'
+ if not os.path.exists(online_input_folder):
+ os.makedirs(online_input_folder)
+ online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
+ if not os.path.exists(online_input_num_folder):
+ os.makedirs(online_input_num_folder)
+ left_msg = msg.data_pieces[0]
+ right_msg = msg.data_pieces[1]
+ elif(len(msg.data_pieces) == 1):
+ left_msg = msg.data_pieces[0]
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ if(left_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
+
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
+ left_depth_np[np.isnan(left_depth_np)] = 0
+ if(left_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
+
+ right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ right_depth_np = np.array(right_msg.depth_map,dtype=np.float64).reshape((right_msg.rgb.height,right_msg.rgb.width))
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
+ right_depth_np[np.isnan(right_depth_np)] = 0
+ if(right_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
+ ur5_joints = np.array(left_msg.joints)
+ real_ee_gripper_joint = ur5_joints[-1]
+ ur5_joints = ur5_joints[:-1]
+ ur5_joints[0] += math.pi
+ ee_pose = self.ur5_solver_.fk(ur5_joints)
+ end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
+ ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
+ b_xyz = 1e-5
+ b_rpy = 1e-3
+ while(qout is None):
+ b_xyz *= 10
+ b_rpy *= 10
+ qout = self.ur5_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
+ if(b_xyz == 0.01):
+ print("Couldn't find good IK")
+ qout = self.q_init_
+ print("Bound xyz: " + str(b_xyz))
+ self.q_init_ = qout
+ ur5_arm_joints = ur5_joints.tolist()
+ ur5_gripper_joint = 0.8 * real_ee_gripper_joint
+ ur5_gripper_joints = [ur5_gripper_joint] * 6
+ panda_arm_joints = qout.tolist()
+ panda_gripper_joint = -0.04 * real_ee_gripper_joint + 0.04
+ panda_gripper_joints = [panda_gripper_joint] * 2
+ full_joint_list = []
+
+ full_joint_list.extend(ur5_arm_joints)
+ full_joint_list.extend(ur5_gripper_joints)
+ full_joint_list.extend(panda_arm_joints)
+ full_joint_list.extend(panda_gripper_joints)
+
+ qout_msg = Float64MultiArray()
+ qout_msg.data = full_joint_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.left_real_rgb_ = left_msg.rgb
+ self.right_real_rgb_ = right_msg.rgb
+
+ self.left_real_depth_ = left_depth_np
+ self.right_real_depth_ = right_depth_np
+ self.real_qout_list_ = full_joint_list
+ end_time = time.time()
+ print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
+ # else:
+ # qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
+ # print("WARNING HIT IK ON FRANKA ERROR")
+
+
+ def listenerCallbackData(self,msg):
+ if self.is_ready_:
+ start_time = time.time()
+ segmentation_data = msg.segmentation
+
+ segmentation_data = self.cv_bridge_.imgmsg_to_cv2(segmentation_data)
+
+ rgb = msg.rgb
+ rgb = np.array(rgb) \
+ .reshape((segmentation_data.shape[0], segmentation_data.shape[1], 3))
+
+
+
+ depth_map = msg.depth_map
+ depth_map = np.array(depth_map) \
+ .reshape(segmentation_data.shape)
+
+
+ joint_array = msg.joints
+ gripper = joint_array[-1]
+ joint_array = joint_array[:-1]
+
+ ee_pose = self.ur5e_solver_.fk(np.array(joint_array))
+ qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
+ self.q_init_ = qout
+
+ # Hardcoded gripper
+ qout_list = qout.tolist()
+ panda_gripper_command = -0.05702400673569841 * gripper + 0.02670973458948458
+ qout_list.append(panda_gripper_command)
+ qout_msg = Float64MultiArray()
+ qout_msg.data = qout_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.joint_commands_callback(qout_msg)
+ self.setupMeshes(rgb, depth_map, segmentation_data)
+ # end_time = time.time()
+ # print("Total time: " + str(end_time - start_time) + " s")
+
+
+
+def main(args=None):
+ rclpy.init(args=args)
+
+ write_data = WriteData()
+
+ rclpy.spin(write_data)
+
+ # Destroy the node explicitly
+ # (optional - otherwise it will be done automatically
+ # when the garbage collector destroys the node object)
+ write_data.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better.py
index 0d59466..d7b255e 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better.py
@@ -3,7 +3,6 @@
import rclpy
from rclpy.node import Node
import trimesh
-import open3d as o3d
from std_msgs.msg import Header, Float64MultiArray,Bool
from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
import numpy as np
@@ -28,7 +27,6 @@
from tracikpy import TracIKSolver
from mdh.kinematic_chain import KinematicChain
from mdh import UnReachable
-import kinpy as kp
from geometry_msgs.msg import Vector3, Quaternion
from scipy.spatial.transform import Rotation as R
from message_filters import ApproximateTimeSynchronizer, Subscriber
@@ -41,11 +39,11 @@ def __init__(self):
self.is_ready_ = False
self.thetas_ = None
self.debug_ = True
+ self.float_image_ = False
file_directory = pathlib.Path(__file__).parent.resolve()
self.panda_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
self.panda_solver_ = TracIKSolver(self.panda_urdf_,"panda_with_ur5_gripper_link0","panda_with_ur5_gripper_link8")
self.ur5_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/ur5_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
self.ur5_solver_ = TracIKSolver(self.ur5_urdf_,"base_link","wrist_3_link")
# real_camera_link to world and then multiply translation by 1000
@@ -80,7 +78,7 @@ def __init__(self):
1)
self.subscription_data_ = self.create_subscription(
InputFilesRealDataMulti,
- 'input_files_data_real_multi',
+ 'input_files_data_real',
self.listenerCallbackOnlineDebug,
1)
self.joint_state_msg = JointState()
@@ -213,37 +211,10 @@ def __init__(self):
timer_period = 0.5
self.links_info_ = []
self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
#self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
+ self.full_mask_image_publisher_ = self.create_publisher(MultipleInpaintImages,"full_mask_image",1)
self.updated_joints_ = False
self.is_ready_ = True
@@ -271,7 +242,7 @@ def pandaRightRgbCallback(self,msg):
def pandaRightDepthCallback(self,msg):
self.panda_right_depth_ = msg
- def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth):
+ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth,world_rgb,world_depth):
real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
@@ -282,58 +253,86 @@ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth):
real_seg_255_np = 255 * real_seg_np
gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
gazebo_seg_255_np = 255 * gazebo_seg_np
- left_real_rgb = self.left_real_rgb_
- right_real_rgb = self.right_real_rgb_
- left_real_depth = self.left_real_depth_
- right_real_depth = self.right_real_depth_
+ cv2.imwrite('real_seg.png',real_seg_255_np)
+ cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
+ real_rgb = world_rgb
+ real_depth = world_depth
real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- return self.dummyInpainting(left_real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
+ return self.dummyInpaintingEarly(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
return
+ def dummyInpaintingEarly(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=35)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ # inverted_segmentation_mask_255 = np.roll(inverted_segmentation_mask_255, shift=(3, 5), axis=(0, 1))
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('background_only_pre3.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre4.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ diffusion_input = cv2.bitwise_and(inpainted_image,inpainted_image,mask=cv2.bitwise_not(better_dilated_blend_mask))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ diffusion_input = (diffusion_input / 255.0).astype(np.float32)
+ return inpainted_image,diffusion_input
+
def noTimeGazeboCallback(self,joint_msg):
start_time = time.time()
- left_inpainted_image = self.doFullInpainting(self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_rgb_,self.panda_left_depth_)
- right_inpainted_image = self.doFullInpainting(self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_rgb_,self.panda_right_depth_)
+ left_inpainted_image,left_mask = self.doFullInpainting(self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_rgb_,self.panda_left_depth_,self.left_real_rgb_,self.left_real_depth_)
+ #right_inpainted_image, right_mask = self.doFullInpainting(self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_rgb_,self.panda_right_depth_,self.right_real_rgb_,self.right_real_depth_)
+ right_inpainted_image, right_mask = left_inpainted_image,left_mask
inpainted_image_msg = MultipleInpaintImages()
- inpainted_image_msg.images = [left_inpainted_image,right_inpainted_image]
+ mask_image_msg = MultipleInpaintImages()
+ inpainted_image_msg.images = [self.cv_bridge_.cv2_to_imgmsg(left_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(right_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(left_mask),self.cv_bridge_.cv2_to_imgmsg(right_mask)]
+ mask_image_msg.images = []
self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.full_mask_image_publisher_.publish(mask_image_msg)
inpaint_number = str(self.i_).zfill(5)
if not os.path.exists('left_inpainting'):
os.makedirs('left_inpainting')
if not os.path.exists('right_inpainting'):
os.makedirs('right_inpainting')
+ if not os.path.exists('left_mask'):
+ os.makedirs('left_mask')
+ if not os.path.exists('right_mask'):
+ os.makedirs('right_mask')
+ if(self.float_image_):
+ left_inpainted_image = (left_inpainted_image * 255).astype(np.uint8)
+ right_inpainted_image = (right_inpainted_image * 255).astype(np.uint8)
+ left_mask = (left_mask * 255).astype(np.uint8)
+ right_mask = (right_mask * 255).astype(np.uint8)
cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
- # real_rgb = self.ur5_rgb_
- # real_depth = self.ur5_depth_
- # gazebo_rgb = self.panda_rgb_
- # gazebo_depth = self.panda_depth_
- # real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- # real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- # gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
-
- # gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- # cv2.imwrite('real_rgb.png',real_rgb_np)
- # cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- # cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- # cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
- # real_seg_np = (real_depth_np < 8).astype(np.uint8)
- # real_seg_255_np = 255 * real_seg_np
- # gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- # gazebo_seg_255_np = 255 * gazebo_seg_np
- # cv2.imwrite('real_seg.png',real_seg_255_np)
- # cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- # left_real_rgb = self.left_real_rgb_
- # right_real_rgb = self.right_real_rgb_
- # left_real_depth = self.left_real_depth_
- # right_real_depth = self.right_real_depth_
- # real_seg = real_seg_255_np
- # gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- # self.dummyInpainting(left_real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- # #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- # self.updated_joints_ = False
+ cv2.imwrite('left_mask/mask'+ str(inpaint_number) +'.png',left_mask)
+ cv2.imwrite('right_mask/mask'+ str(inpaint_number) +'.png',right_mask)
end_time = time.time()
print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
return
@@ -1105,57 +1104,66 @@ def listenerCallbackOnlineDebug(self,msg):
os.makedirs(online_input_num_folder)
left_msg = msg.data_pieces[0]
right_msg = msg.data_pieces[1]
-
- left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
- left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
- left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
- left_depth_np[np.isnan(left_depth_np)] = 0
-
- right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- right_depth_np = np.array(right_msg.depth_map,dtype=np.float64).reshape((right_msg.rgb.height,right_msg.rgb.width))
- right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
- right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
- right_depth_np[np.isnan(right_depth_np)] = 0
-
- ur5_joints = np.array(left_msg.joints)
- ur5_joints[0] += math.pi
- ee_pose = self.ur5_solver_.fk(ur5_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
- ee_pose = ee_pose @ end_effector_rotation_with_no_translation
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- b_xyz = 1e-5
- b_rpy = 1e-3
- while(qout is None):
- b_xyz *= 10
- b_rpy *= 10
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
- print("Couldn't find good IK")
- qout = self.q_init_
- print("Bound xyz: " + str(b_xyz))
- #qout[0] -= math.pi
- self.q_init_ = qout
- # Hardcoded gripper
- qout_list = qout.tolist()
- ur5_joints = ur5_joints.tolist()
- ur5_joints.extend(qout_list)
- qout_list = ur5_joints
- qout_msg = Float64MultiArray()
- qout_msg.data = qout_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.left_real_rgb_ = left_msg.rgb
- self.right_real_rgb_ = right_msg.rgb
-
- self.left_real_depth_ = left_depth_np
- self.right_real_depth_ = right_depth_np
- self.real_qout_list_ = qout_list
- end_time = time.time()
- print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
+ elif(len(msg.data_pieces) == 1):
+ left_msg = msg.data_pieces[0]
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ if(left_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
+ left_depth_np[np.isnan(left_depth_np)] = 0
+
+ right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
+ if(right_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ right_depth_np = np.array(right_msg.depth_map,dtype=np.float64).reshape((right_msg.rgb.height,right_msg.rgb.width))
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
+ right_depth_np[np.isnan(right_depth_np)] = 0
+
+ ur5_joints = np.array(left_msg.joints)
+ ur5_joints = ur5_joints[:-1]
+ ur5_joints[0] += math.pi
+ ee_pose = self.ur5_solver_.fk(ur5_joints)
+ end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
+ ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
+ b_xyz = 1e-5
+ b_rpy = 1e-3
+ while(qout is None):
+ b_xyz *= 10
+ b_rpy *= 10
+ qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
+ if(b_xyz == 0.01):
+ print("Couldn't find good IK")
+ qout = self.q_init_
+ print("Bound xyz: " + str(b_xyz))
+ #qout[0] -= math.pi
+ self.q_init_ = qout
+ # Hardcoded gripper
+ qout_list = qout.tolist()
+ ur5_joints = ur5_joints.tolist()
+ ur5_joints.extend(qout_list)
+ qout_list = ur5_joints
+ qout_msg = Float64MultiArray()
+ qout_list[2] = 1.58889
+ qout_msg.data = qout_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.left_real_rgb_ = left_msg.rgb
+ self.right_real_rgb_ = right_msg.rgb
+
+ self.left_real_depth_ = left_depth_np
+ self.right_real_depth_ = right_depth_np
+ self.real_qout_list_ = qout_list
+ end_time = time.time()
+ print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
# else:
# qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
# print("WARNING HIT IK ON FRANKA ERROR")
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_gripper_multi_real_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better_early_inpaint.py
similarity index 85%
rename from mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_gripper_multi_real_better.py
rename to mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better_early_inpaint.py
index 0e07e60..43bd280 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_gripper_multi_real_better.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better_early_inpaint.py
@@ -3,7 +3,6 @@
import rclpy
from rclpy.node import Node
import trimesh
-import open3d as o3d
from std_msgs.msg import Header, Float64MultiArray,Bool
from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
import numpy as np
@@ -28,7 +27,6 @@
from tracikpy import TracIKSolver
from mdh.kinematic_chain import KinematicChain
from mdh import UnReachable
-import kinpy as kp
from geometry_msgs.msg import Vector3, Quaternion
from scipy.spatial.transform import Rotation as R
from message_filters import ApproximateTimeSynchronizer, Subscriber
@@ -41,13 +39,11 @@ def __init__(self):
self.is_ready_ = False
self.thetas_ = None
self.debug_ = True
- self.float_image_ = False
file_directory = pathlib.Path(__file__).parent.resolve()
- self.panda_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_gripper_ik_real.urdf')
- self.panda_solver_ = TracIKSolver(self.panda_urdf_,"panda_link0","panda_ee")
+ self.panda_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
+ self.panda_solver_ = TracIKSolver(self.panda_urdf_,"panda_with_ur5_gripper_link0","panda_with_ur5_gripper_link8")
self.ur5_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/ur5_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
- self.ur5_solver_ = TracIKSolver(self.ur5_urdf_,"base_link","ur5_ee_gripper")
+ self.ur5_solver_ = TracIKSolver(self.ur5_urdf_,"base_link","wrist_3_link")
# real_camera_link to world and then multiply translation by 1000
# self.camera_to_world_ = np.array([[0,1,0,0],
@@ -81,31 +77,14 @@ def __init__(self):
1)
self.subscription_data_ = self.create_subscription(
InputFilesRealDataMulti,
- 'input_files_data_real_multi',
+ 'input_files_data_real',
self.listenerCallbackOnlineDebug,
1)
self.joint_state_msg = JointState()
#Harcoding start position
self.q_init_ = np.array([-0.56332457, 0.06948572, 0.5227356, -2.26363611, -0.11123186, 2.28321218, -0.09410787])
-
- self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro")
- xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_
- xacro_subprocess = subprocess.Popen(
- xacro_command,
- shell=True,
- stdout=subprocess.PIPE,
- )
- urdf_string = ""
- while True:
- line = xacro_subprocess.stdout.readline()
- if line:
- line_byte = line.strip()
- line = line_byte.decode("utf-8")
- urdf_string += line
- else:
- break
- root = ET.fromstring(urdf_string)
+
self.publishers_ = []
self.subscribers_ = []
self.timers_ = []
@@ -168,7 +147,7 @@ def __init__(self):
self.pandaLeftDepthCallback,
1
)
-
+ self.float_image_ = False
self.ur5_right_rgb_ = None
self.ur5_right_depth_ = None
self.panda_right_rgb_ = None
@@ -201,6 +180,7 @@ def __init__(self):
1
)
+
self.joint_subscription_ = self.create_subscription(
JointState,
'/gazebo_joint_states',
@@ -213,37 +193,10 @@ def __init__(self):
timer_period = 0.5
self.links_info_ = []
self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
#self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
+ self.full_mask_image_publisher_ = self.create_publisher(MultipleInpaintImages,"full_mask_image",1)
self.updated_joints_ = False
self.is_ready_ = True
@@ -271,22 +224,7 @@ def pandaRightRgbCallback(self,msg):
def pandaRightDepthCallback(self,msg):
self.panda_right_depth_ = msg
- def noTimeGazeboCallback(self,joint_msg):
- start_time = time.time()
- left_inpainted_image = self.doFullInpainting(self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_rgb_,self.panda_left_depth_)
- right_inpainted_image = self.doFullInpainting(self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_rgb_,self.panda_right_depth_)
- inpainted_image_msg = MultipleInpaintImages()
- inpainted_image_msg.images = [left_inpainted_image,right_inpainted_image]
- self.inpainted_publisher_.publish(inpainted_image_msg)
- inpaint_number = str(self.i_).zfill(5)
- if not os.path.exists('left_inpainting'):
- os.makedirs('left_inpainting')
- if not os.path.exists('right_inpainting'):
- os.makedirs('right_inpainting')
- cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
- cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
-
- def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth):
+ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth,world_rgb,world_depth):
real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
@@ -303,41 +241,62 @@ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth):
gazebo_seg_255_np = 255 * gazebo_seg_np
cv2.imwrite('real_seg.png',real_seg_255_np)
cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
+ real_rgb = world_rgb
+ real_depth = world_depth
real_seg = real_seg_255_np
gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- return self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
-
+ return self.dummyInpaintingEarly(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
+
def noTimeGazeboCallback(self,joint_msg):
start_time = time.time()
- real_rgb = self.ur5_rgb_
- real_depth = self.ur5_depth_
- gazebo_rgb = self.panda_rgb_
- gazebo_depth = self.panda_depth_
- real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
- real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
- gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
+ left_inpainted_image,left_mask = self.doFullInpainting(self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_rgb_,self.panda_left_depth_,self.left_real_rgb_,self.left_real_depth_)
+ right_inpainted_image,right_mask = self.doFullInpainting(self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_rgb_,self.panda_right_depth_,self.right_real_rgb_,self.right_real_depth_)
+ inpainted_image_msg = MultipleInpaintImages()
+ mask_image_msg = MultipleInpaintImages()
+ inpainted_image_msg.images = [self.cv_bridge_.cv2_to_imgmsg(left_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(right_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(left_inpainted_image),self.cv_bridge_.cv2_to_imgmsg(right_inpainted_image)]
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('left_inpainting'):
+ os.makedirs('left_inpainting')
+ if not os.path.exists('right_inpainting'):
+ os.makedirs('right_inpainting')
+ if not os.path.exists('left_mask'):
+ os.makedirs('left_mask')
+ if not os.path.exists('right_mask'):
+ os.makedirs('right_mask')
+ cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
+ cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
+ cv2.imwrite('left_mask/mask'+ str(inpaint_number) +'.png',left_mask)
+ cv2.imwrite('right_mask/mask'+ str(inpaint_number) +'.png',right_mask)
+ # real_rgb = self.ur5_rgb_
+ # real_depth = self.ur5_depth_
+ # gazebo_rgb = self.panda_rgb_
+ # gazebo_depth = self.panda_depth_
+ # real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
+ # real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
+ # gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
- gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
+ # gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
+ # cv2.imwrite('real_rgb.png',real_rgb_np)
+ # cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
+ # cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
+ # cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
- real_seg_np = (real_depth_np < 8).astype(np.uint8)
- real_seg_255_np = 255 * real_seg_np
- gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
- gazebo_seg_255_np = 255 * gazebo_seg_np
- cv2.imwrite('real_seg.png',real_seg_255_np)
- cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
- real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
+ # real_seg_np = (real_depth_np < 8).astype(np.uint8)
+ # real_seg_255_np = 255 * real_seg_np
+ # gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
+ # gazebo_seg_255_np = 255 * gazebo_seg_np
+ # cv2.imwrite('real_seg.png',real_seg_255_np)
+ # cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
+ # left_real_rgb = self.left_real_rgb_
+ # right_real_rgb = self.right_real_rgb_
+ # left_real_depth = self.left_real_depth_
+ # right_real_depth = self.right_real_depth_
+ # real_seg = real_seg_255_np
+ # gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
+ # self.dummyInpainting(left_real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
+ # #self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
+ # self.updated_joints_ = False
end_time = time.time()
print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
return
@@ -361,7 +320,7 @@ def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
gazebo_seg_255_np = 255 * gazebo_seg_np
cv2.imwrite('real_seg.png',real_seg_255_np)
cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
+ real_rgb = self.left_real_rgb_
real_depth = self.real_depth_
real_seg = real_seg_255_np
gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
@@ -561,6 +520,56 @@ def convertPointcloudToDepth(self,pointcloud):
depth_image[pixel[1],pixel[0]] = point[2]
return depth_image
+ def dummyInpaintingEarly(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+ seg_file = cv2.erode(seg_file,np.ones((3,3),np.uint8),iterations=3)
+
+ cv2.imwrite('seg_file.png',seg_file)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=30)
+ cv2.imwrite('inverted_mask2.png',inverted_segmentation_mask_255)
+ eroded_segmentation_mask_255 = cv2.bitwise_not(inverted_segmentation_mask_255)
+ cv2.imwrite('eroded_mask1.png',eroded_segmentation_mask_255)
+ eroded_segmentation_mask_255 = eroded_segmentation_mask_255 + seg_file
+ cv2.imwrite('eroded_mask2.png',eroded_segmentation_mask_255)
+ inverted_segmentation_mask_255 = cv2.bitwise_not(eroded_segmentation_mask_255)
+ cv2.imwrite('final_mask.png',inverted_segmentation_mask_255)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre3.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ return inpainted_image,better_dilated_blend_mask
+
def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
# rgb = cv2.resize(rgb,(128,128))
@@ -598,6 +607,7 @@ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
target_mask = np.zeros_like(inpainted_image)
target_mask[:,:] = target_color
inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+ return self.cv_bridge_.cv2_to_imgmsg(inpainted_image,encoding="bgr8")
# inpainted_image = cv2_inpaint_image
inpaint_number = str(self.i_).zfill(5)
if not os.path.exists('inpainting'):
@@ -606,9 +616,6 @@ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
os.makedirs('mask')
cv2.imwrite('inpainting/inpaint'+ str(inpaint_number) +'.png',inpainted_image)
cv2.imwrite('mask/mask'+ str(inpaint_number) +'.png',better_dilated_blend_mask.astype(np.uint8))
- if(self.float_image_):
- import pdb
- pdb.set_trace()
inpainted_image_msg = self.cv_bridge_.cv2_to_imgmsg(inpainted_image,encoding="bgr8")
mask_image_msg = self.cv_bridge_.cv2_to_imgmsg(better_dilated_blend_mask.astype(np.uint8),encoding="mono8")
self.inpainted_publisher_.publish(inpainted_image_msg)
@@ -1111,78 +1118,58 @@ def listenerCallbackOnlineDebug(self,msg):
os.makedirs(online_input_num_folder)
left_msg = msg.data_pieces[0]
right_msg = msg.data_pieces[1]
- start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
- if(left_rgb_np.dtype == np.float32):
- self.float_image_ = True
- left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
- left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
- left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
- left_depth_np[np.isnan(left_depth_np)] = 0
-
- right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
- if(right_rgb_np.dtype == np.float32):
- self.float_image_ = True
- right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
- # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- right_depth_np = np.array(right_msg.depth_map,dtype=np.float64).reshape((right_msg.rgb.height,right_msg.rgb.width))
- right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
- right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
- right_depth_np[np.isnan(right_depth_np)] = 0
-
- ur5_joints = np.array(left_msg.joints)
- ur5_joints[0] += math.pi
- ee_pose = self.ur5_solver_.fk(ur5_joints)
- end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
- ee_pose = ee_pose @ end_effector_rotation_with_no_translation
- scipy_rotation = R.from_matrix(ee_pose[:3,:3])
- scipy_quaternion = scipy_rotation.as_quat()
- qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
- b_xyz = 1e-5
- b_rpy = 1e-3
- while(qout is None):
- b_xyz *= 10
- b_rpy *= 10
- qout = self.ur5_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
- if(b_xyz == 0.01):
- print("Couldn't find good IK")
- qout = self.q_init_
- print("Bound xyz: " + str(b_xyz))
- self.q_init_ = qout
- ur5_arm_joints = ur5_joints.tolist()
- ur5_gripper_joint = 0.0
- ur5_gripper_joints = [ur5_gripper_joint] * 6
- panda_arm_joints = qout.tolist()
- panda_gripper_joint = 0.04
- panda_gripper_joints = [panda_gripper_joint] * 2
- full_joint_list = []
-
- full_joint_list.extend(ur5_arm_joints)
- full_joint_list.extend(ur5_gripper_joints)
- full_joint_list.extend(panda_arm_joints)
- full_joint_list.extend(panda_gripper_joints)
-
- qout_msg = Float64MultiArray()
- qout_msg.data = full_joint_list
- self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.left_real_rgb_ = left_msg.rgb
- self.right_real_rgb_ = right_msg.rgb
-
- self.left_real_depth_ = left_depth_np
- self.right_real_depth_ = right_depth_np
- self.real_qout_list_ = full_joint_list
- end_time = time.time()
- print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
+
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
+ left_depth_np[np.isnan(left_depth_np)] = 0
+
+ right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
+ # rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
+ right_depth_np = np.array(right_msg.depth_map,dtype=np.float64).reshape((right_msg.rgb.height,right_msg.rgb.width))
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
+ right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
+ right_depth_np[np.isnan(right_depth_np)] = 0
+
+ ur5_joints = np.array(left_msg.joints)
+ ur5_joints = ur5_joints[:-1]
+ ur5_joints[0] += math.pi
+ ee_pose = self.ur5_solver_.fk(ur5_joints)
+ end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
+ ee_pose = ee_pose @ end_effector_rotation_with_no_translation
+ scipy_rotation = R.from_matrix(ee_pose[:3,:3])
+ scipy_quaternion = scipy_rotation.as_quat()
+ qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_)
+ b_xyz = 1e-5
+ b_rpy = 1e-3
+ while(qout is None):
+ b_xyz *= 10
+ b_rpy *= 10
+ qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_,bx=b_xyz,by=b_xyz,bz=b_xyz,brx=b_rpy,bry=b_rpy,brz=b_rpy)
+ if(b_xyz == 0.01):
+ print("Couldn't find good IK")
+ qout = self.q_init_
+ print("Bound xyz: " + str(b_xyz))
+ #qout[0] -= math.pi
+ self.q_init_ = qout
+ # Hardcoded gripper
+ qout_list = qout.tolist()
+ ur5_joints = ur5_joints.tolist()
+ ur5_joints.extend(qout_list)
+ qout_list = ur5_joints
+ qout_msg = Float64MultiArray()
+ qout_msg.data = qout_list
+ self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
+ self.left_real_rgb_ = left_msg.rgb
+ self.right_real_rgb_ = right_msg.rgb
+
+ self.left_real_depth_ = left_depth_np
+ self.right_real_depth_ = right_depth_np
+ self.real_qout_list_ = qout_list
+ end_time = time.time()
+ print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
# else:
# qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_,bx=1e-3,by=1e-3,bz=1e-3)
# print("WARNING HIT IK ON FRANKA ERROR")
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_real_better.py b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better_reproject.py
similarity index 75%
rename from mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_real_better.py
rename to mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better_reproject.py
index 66a1940..e9af62f 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_real_better.py
+++ b/mirage/mirage/ros_ws/src/gazebo_env/scripts/write_data_node_ur5_to_panda_no_gripper_multi_real_better_reproject.py
@@ -3,7 +3,6 @@
import rclpy
from rclpy.node import Node
import trimesh
-import open3d as o3d
from std_msgs.msg import Header, Float64MultiArray,Bool
from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image
import numpy as np
@@ -23,12 +22,11 @@
import cv2
from cv_bridge import CvBridge
import time
-from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData
+from input_filenames_msg.msg import InputFilesRobosuite, InputFilesRobosuiteData, InputFilesRealData, InputFilesRealDataMulti, MultipleInpaintImages
from sensor_msgs.msg import JointState
from tracikpy import TracIKSolver
from mdh.kinematic_chain import KinematicChain
from mdh import UnReachable
-import kinpy as kp
from geometry_msgs.msg import Vector3, Quaternion
from scipy.spatial.transform import Rotation as R
from message_filters import ApproximateTimeSynchronizer, Subscriber
@@ -41,11 +39,11 @@ def __init__(self):
self.is_ready_ = False
self.thetas_ = None
self.debug_ = True
+ self.float_image_ = False
file_directory = pathlib.Path(__file__).parent.resolve()
self.panda_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/panda_ur5_gripper_ik_real.urdf')
self.panda_solver_ = TracIKSolver(self.panda_urdf_,"panda_with_ur5_gripper_link0","panda_with_ur5_gripper_link8")
self.ur5_urdf_ = os.path.join(file_directory,'../../../../src/gazebo_env/description/urdf/ur5_ik_real.urdf')
- self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read())
self.ur5_solver_ = TracIKSolver(self.ur5_urdf_,"base_link","wrist_3_link")
# real_camera_link to world and then multiply translation by 1000
@@ -61,6 +59,9 @@ def __init__(self):
self.robosuite_intrinsic_matrix_ = np.array([[101.39696962, 0. , 42. ],
[ 0. , 101.39696962, 42. ],
[ 0. , 0. , 1. ]])
+ self.intrinsic_matrix_ = np.array([[524.22595215, 0. , 639.77819824],
+ [ 0. , 524.22595215, 370.27804565],
+ [0,0,1]])
self.ur5_and_panda_joint_command_publisher_ = self.create_publisher(Float64MultiArray,'/joint_commands',1)
self.panda_joint_state_publisher_ = self.create_publisher(
JointState,
@@ -79,7 +80,7 @@ def __init__(self):
self.listenerCallback,
1)
self.subscription_data_ = self.create_subscription(
- InputFilesRealData,
+ InputFilesRealDataMulti,
'input_files_data_real',
self.listenerCallbackOnlineDebug,
1)
@@ -108,8 +109,10 @@ def __init__(self):
self.publishers_ = []
self.subscribers_ = []
self.timers_ = []
- self.real_rgb_ = None
- self.real_depth_ = None
+ self.left_real_rgb_ = None
+ self.right_real_rgb_ = None
+ self.left_real_depth_ = None
+ self.right_real_depth_ = None
self.real_seg_ = None
self.real_qout_list_ = None
self.i_ = -1
@@ -134,38 +137,71 @@ def __init__(self):
1
)
- self.ur5_rgb_ = None
- self.ur5_depth_ = None
- self.panda_rgb_ = None
- self.panda_depth_ = None
- self.ur5_camera_color_subscription_ = self.create_subscription(
+ self.ur5_left_rgb_ = None
+ self.ur5_left_depth_ = None
+ self.panda_left_rgb_ = None
+ self.panda_left_depth_ = None
+ self.ur5_left_camera_color_subscription_ = self.create_subscription(
Image,
- '/ur5_camera/image_raw',
- self.ur5RgbCallback,
+ '/ur5_left_camera/image_raw',
+ self.ur5LeftRgbCallback,
1
)
- self.ur5_camera_depth_subscription_ = self.create_subscription(
+ self.ur5_left_camera_depth_subscription_ = self.create_subscription(
Image,
- '/ur5_camera/depth/image_raw',
- self.ur5DepthCallback,
+ '/ur5_left_camera/depth/image_raw',
+ self.ur5LeftDepthCallback,
1
)
- self.panda_camera_color_subscription_ = self.create_subscription(
+ self.panda_left_camera_color_subscription_ = self.create_subscription(
Image,
- '/panda_camera/image_raw',
- self.pandaRgbCallback,
+ '/panda_left_camera/image_raw',
+ self.pandaLeftRgbCallback,
1
)
- self.panda_camera_depth_subscription_ = self.create_subscription(
+ self.panda_left_camera_depth_subscription_ = self.create_subscription(
Image,
- '/panda_camera/depth/image_raw',
- self.pandaDepthCallback,
+ '/panda_left_camera/depth/image_raw',
+ self.pandaLeftDepthCallback,
1
)
+ self.ur5_right_rgb_ = None
+ self.ur5_right_depth_ = None
+ self.panda_right_rgb_ = None
+ self.panda_right_depth_ = None
+ self.ur5_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_right_camera/image_raw',
+ self.ur5RightRgbCallback,
+ 1
+ )
+
+ self.ur5_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/ur5_right_camera/depth/image_raw',
+ self.ur5RightDepthCallback,
+ 1
+ )
+
+ self.panda_right_camera_color_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/image_raw',
+ self.pandaRightRgbCallback,
+ 1
+ )
+
+ self.panda_right_camera_depth_subscription_ = self.create_subscription(
+ Image,
+ '/panda_right_camera/depth/image_raw',
+ self.pandaRightDepthCallback,
+ 1
+ )
+
+
self.joint_subscription_ = self.create_subscription(
JointState,
'/gazebo_joint_states',
@@ -178,81 +214,243 @@ def __init__(self):
timer_period = 0.5
self.links_info_ = []
self.original_meshes_ = []
- for link in root.iter('link'):
- element_name1 = "visual"
- found_element1 = link.find(".//" + element_name1)
- element_name2 = "geometry"
- found_element2 = link.find(".//" + element_name2)
- element_name3 = "mesh"
- found_element3 = link.find(".//" + element_name3)
- if (found_element1 is not None) and (found_element2 is not None) and (found_element3 is not None):
- link_name = link.attrib.get('name')
- for visual in link.iter("visual"):
- origin_element = visual.find(".//origin")
- rpy_str = origin_element.attrib.get('rpy')
- xyz_str = origin_element.attrib.get('xyz')
- for geometry in visual.iter("geometry"):
- for mesh in geometry.iter("mesh"):
- filename = mesh.attrib.get('filename')[7:]
- #publisher = self.create_publisher(PointCloud2,link_name+"_pointcloud",10)
- #publisher_camera = self.create_publisher(PointCloud2,link_name+"_pointcloud_camera",10)
- #self.publishers_.append(publisher)
- #self.publishers_.append(publisher_camera)
- #subscriber = Subscriber(self,PointCloud2,link_name+"_pointcloud")
- #self.subscribers_.append(subscriber)
- mesh = self.prelimMeshFast(filename,link_name,rpy_str,xyz_str)
- self.original_meshes_.append(mesh)
- self.links_info_.append([filename,link_name,rpy_str,xyz_str])
- #timer = self.create_timer(timer_period,partial(self.debugTimerCallback,filename,link_name,publisher,publisher_camera,rpy_str,xyz_str))
- #self.timers_.append(timer)
self.full_publisher_ = self.create_publisher(PointCloud2,"full_pointcloud",1)
- self.inpainted_publisher_ = self.create_publisher(Image,"inpainted_image",1)
+ self.inpainted_publisher_ = self.create_publisher(MultipleInpaintImages,"inpainted_image",1)
#self.full_subscriber_ = self.create_subscription(PointCloud2,'full_pointcloud',self.fullPointcloudCallback,10)
- self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1)
+ self.full_mask_image_publisher_ = self.create_publisher(MultipleInpaintImages,"full_mask_image",1)
self.updated_joints_ = False
self.is_ready_ = True
- def ur5RgbCallback(self,msg):
- self.ur5_rgb_ = msg
+ def ur5LeftRgbCallback(self,msg):
+ self.ur5_left_rgb_ = msg
- def ur5DepthCallback(self,msg):
- self.ur5_depth_ = msg
+ def ur5LeftDepthCallback(self,msg):
+ self.ur5_left_depth_ = msg
- def pandaRgbCallback(self,msg):
- self.panda_rgb_ = msg
+ def pandaLeftRgbCallback(self,msg):
+ self.panda_left_rgb_ = msg
- def pandaDepthCallback(self,msg):
- self.panda_depth_ = msg
+ def pandaLeftDepthCallback(self,msg):
+ self.panda_left_depth_ = msg
- def noTimeGazeboCallback(self,joint_msg):
- start_time = time.time()
- real_rgb = self.ur5_rgb_
- real_depth = self.ur5_depth_
- gazebo_rgb = self.panda_rgb_
- gazebo_depth = self.panda_depth_
+ def ur5RightRgbCallback(self,msg):
+ self.ur5_right_rgb_ = msg
+
+ def ur5RightDepthCallback(self,msg):
+ self.ur5_right_depth_ = msg
+
+ def pandaRightRgbCallback(self,msg):
+ self.panda_right_rgb_ = msg
+
+ def pandaRightDepthCallback(self,msg):
+ self.panda_right_depth_ = msg
+
+ def doFullInpainting(self,real_rgb,real_depth,gazebo_rgb,gazebo_depth,world_rgb,world_depth):
real_rgb_np = self.cv_bridge_.imgmsg_to_cv2(real_rgb)
real_depth_np = self.cv_bridge_.imgmsg_to_cv2(real_depth)
gazebo_rgb_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_rgb)
gazebo_depth_np = self.cv_bridge_.imgmsg_to_cv2(gazebo_depth)
- cv2.imwrite('real_rgb.png',real_rgb_np)
- cv2.imwrite('real_depth.png',self.normalize_depth_image(real_depth_np))
- cv2.imwrite('gazebo_rgb.png',gazebo_rgb_np)
- cv2.imwrite('gazebo_depth.png',self.normalize_depth_image(gazebo_depth_np))
-
+
real_seg_np = (real_depth_np < 8).astype(np.uint8)
real_seg_255_np = 255 * real_seg_np
gazebo_seg_np = (gazebo_depth_np < 8).astype(np.uint8)
gazebo_seg_255_np = 255 * gazebo_seg_np
cv2.imwrite('real_seg.png',real_seg_255_np)
cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
- real_depth = self.real_depth_
+ real_rgb = world_rgb
+ real_depth = world_depth
real_seg = real_seg_255_np
- gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
- self.dummyInpainting(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
- self.inpainting(real_rgb,real_depth,real_seg,gazebo_rgb_np,gazebo_seg_255_np,gazebo_depth_np)
- self.updated_joints_ = False
+ return self.dummyInpaintingEarly(real_rgb,real_seg,gazebo_rgb_np,gazebo_seg_255_np)
+ return
+
+ def dummyInpaintingEarly(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
+ rgb = self.cv_bridge_.imgmsg_to_cv2(rgb)
+ if(rgb.dtype == np.float32):
+ rgb = (rgb * 255).astype(np.uint8)
+ # rgb = cv2.resize(rgb,(128,128))
+ # seg_file = cv2.resize(seg_file,(128,128))
+ # gazebo_rgb = cv2.resize(gazebo_rgb,(128,128))
+ # gazebo_seg = cv2.resize(gazebo_seg,(128,128))
+ _, gazebo_seg = cv2.threshold(gazebo_seg, 128, 255, cv2.THRESH_BINARY)
+ _, seg_file = cv2.threshold(seg_file, 128, 255, cv2.THRESH_BINARY)
+ gazebo_segmentation_mask_255 = gazebo_seg
+ inverted_segmentation_mask_255_original = cv2.bitwise_not(seg_file)
+ cv2.imwrite('inverted_mask1.png',inverted_segmentation_mask_255_original)
+ inverted_segmentation_mask_255 = cv2.erode(inverted_segmentation_mask_255_original,np.ones((3,3),np.uint8),iterations=30)
+ gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
+ # gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab[:,:,0] += 10
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ cv2.imwrite('gazebo_robot_only.png',gazebo_only)
+ cv2.imwrite('background_only_pre1.png',rgb)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ _, inverted_segmentation_mask_255 = cv2.threshold(inverted_segmentation_mask_255, 128, 255, cv2.THRESH_BINARY)
+ cv2.imwrite('background_only_pre2.png',inverted_segmentation_mask_255)
+ background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255)
+ cv2.imwrite('background_only_pre3.png',background_only)
+ background_only = cv2.inpaint(background_only,cv2.bitwise_not(inverted_segmentation_mask_255),3,cv2.INPAINT_TELEA)
+ cv2.imwrite('background_only2.png',background_only)
+ inverted_seg_file_original = cv2.bitwise_not(seg_file)
+ inverted_seg_file = cv2.erode(inverted_seg_file_original,np.ones((3,3),np.uint8),iterations=3)
+ background_only = cv2.bitwise_and(background_only,background_only,mask=cv2.bitwise_not(gazebo_segmentation_mask_255))
+ cv2.imwrite('background_only3.png',background_only)
+ inpainted_image = gazebo_only + background_only
+ cv2.imwrite('background_only4.png',inpainted_image)
+ better_dilated_blend_mask = cv2.bitwise_not(inverted_seg_file)*cv2.bitwise_not(gazebo_seg)*255
+ diffusion_input = cv2.bitwise_and(inpainted_image,inpainted_image,mask=cv2.bitwise_not(better_dilated_blend_mask))
+ if(self.float_image_):
+ inpainted_image = (inpainted_image / 255.0).astype(np.float32)
+ diffusion_input = (diffusion_input / 255.0).astype(np.float32)
+ return inpainted_image,diffusion_input
+
+ def reproject(self,left_real_rgb,left_real_depth,reproject_tf):
+ rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_real_rgb)
+ depth_np = left_real_depth
+ depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
+ depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
+ depth_np[np.isnan(depth_np)] = 0.0001
+ fx = self.intrinsic_matrix_[0][0]
+ fy = self.intrinsic_matrix_[1][1]
+ cx = self.intrinsic_matrix_[0][2]
+ cy = self.intrinsic_matrix_[1][2]
+ u = np.tile(np.arange(rgb_np.shape[1]), (rgb_np.shape[0], 1))
+ v = np.tile(np.arange(rgb_np.shape[0]),(rgb_np.shape[1],1)).T
+ d = depth_np
+ z = d
+ x = (u - cx) * z / fx
+ y = (v - cy) * z / fy
+ xyz_np = np.stack([x,y,z],axis=-1)
+ keys = xyz_np.reshape(-1,3)
+ values = rgb_np.reshape(-1,3)
+ data_dict = dict(zip(map(tuple,keys),map(tuple,values)))
+ points = np.array([x.reshape(-1),y.reshape(-1),z.reshape(-1)]).T
+ homogenous_points = np.column_stack((points, np.ones(len(points))))
+ transformed_points = np.dot(reproject_tf, homogenous_points.T).T[:, :3]
+ point_dict = dict(zip(map(tuple,transformed_points),map(tuple,homogenous_points)))
+ new_image_mask = np.zeros((left_real_rgb.height,left_real_rgb.width)).astype(np.uint8)
+ new_image = np.zeros((left_real_rgb.height,left_real_rgb.width,3)).astype(np.uint8)
+ new_depth = np.zeros((left_real_rgb.height,left_real_rgb.width)).astype(np.float64)
+ new_fx = fx
+ new_fy = fy
+ new_cx = cx
+ new_cy = cy
+ # Vectorized computation of coordinates
+ u_coords = np.round((new_fx * transformed_points[:, 0] / transformed_points[:, 2]) + new_cx).astype(int)
+ v_coords = np.round((new_fy * transformed_points[:, 1] / transformed_points[:, 2]) + new_cy).astype(int)
+
+ valid_coords_mask = np.logical_and.reduce([
+ (0 <= u_coords), (u_coords < left_real_rgb.width),
+ (0 <= v_coords), (v_coords < left_real_rgb.height),
+ ~np.isnan(transformed_points).any(axis=1)
+ ])
+
+ # Convert lists to tuples before using them as keys
+ point_keys = tuple(map(tuple, transformed_points[valid_coords_mask].tolist()))
+ data_values = np.array([data_dict[point_dict[key][:3]] for key in point_keys], dtype=np.uint8)
+ new_image_mask[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = 255
+ new_image[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = data_values
+ new_depth[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = transformed_points[:,2][valid_coords_mask]
+
+ #new_image_mask[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = 255
+ #new_image[v_coords[valid_coords_mask], u_coords[valid_coords_mask]] = data_dict[
+ # tuple(transformed_points[valid_coords_mask].astype(int).tolist())[:3]]
+
+ inverted_image_mask = cv2.bitwise_not(new_image_mask)
+ inpainted_image = cv2.inpaint(new_image,inverted_image_mask,1,cv2.INPAINT_TELEA)
+ cv2.imwrite('image_overlap.png',new_image_mask)
+ cv2.imwrite('image_overlap_color.png',new_image)
+ cv2.imwrite('image_overlap_inpaint.png',inpainted_image)
+ cv2.imwrite('normalized_depth_new.png',self.normalize_depth_image(new_depth))
+ return self.cv_bridge_.cv2_to_imgmsg(inpainted_image),new_depth,new_image
+
+ def noTimeGazeboCallback(self,joint_msg):
+ start_time = time.time()
+ left_real_rgb = self.left_real_rgb_
+ left_real_depth = self.left_real_depth_
+ right_real_rgb = self.right_real_rgb_
+ right_real_depth = self.right_real_depth_
+ reproject_tf = np.array([[0.987,-0.136,0.083,-0.05],
+ [0.124,0.983,0.136,-0.05],
+ [-0.01,-0.124,0.987,-0.05],
+ [0,0,0,1]])
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('reproject_images'):
+ os.makedirs('reproject_images')
+ if not os.path.exists('reproject_images/raw_rgb'):
+ os.makedirs('reproject_images/raw_rgb')
+ if not os.path.exists('reproject_images/reproject_once_'):
+ os.makedirs('reproject_images/reproject_once_')
+ if not os.path.exists('reproject_images/reproject_inpaint_once_'):
+ os.makedirs('reproject_images/reproject_inpaint_once_')
+ if not os.path.exists('reproject_images/reproject_twice_'):
+ os.makedirs('reproject_images/reproject_twice_')
+ if not os.path.exists('reproject_images/reproject_inpaint_twice_'):
+ os.makedirs('reproject_images/reproject_inpaint_twice_')
+ np.savetxt('reproject_images/camera_tf.txt',reproject_tf)
+ if(self.float_image_):
+ left_real_rgb = (self.cv_bridge_.imgmsg_to_cv2(left_real_rgb) * 255).astype(np.uint8)
+ cv2.imwrite('reproject_images/raw_rgb/raw_rgb'+ str(inpaint_number) +'.png',cv2.cvtColor(left_real_rgb,cv2.COLOR_BGR2RGB))
+ left_real_rgb = self.cv_bridge_.cv2_to_imgmsg(left_real_rgb)
+ left_real_rgb,left_real_depth,left_new_image = self.reproject(left_real_rgb,left_real_depth,reproject_tf)
+
+ first_reproject_straight_up = left_new_image
+ first_reproject_inpaint = self.cv_bridge_.imgmsg_to_cv2(left_real_rgb)
+ cv2.imwrite('reproject_images/reproject_once_/reproject'+ str(inpaint_number) +'.png',cv2.cvtColor(left_new_image,cv2.COLOR_BGR2RGB))
+ cv2.imwrite('reproject_images/reproject_inpaint_once_/reproject'+ str(inpaint_number) +'.png',cv2.cvtColor(self.cv_bridge_.imgmsg_to_cv2(left_real_rgb),cv2.COLOR_BGR2RGB))
+
+ left_real_rgb,left_real_depth,left_new_image = self.reproject(left_real_rgb,left_real_depth,np.linalg.inv(reproject_tf))
+
+ second_reproject_straight_up = left_new_image
+ second_reproject_inpaint = self.cv_bridge_.imgmsg_to_cv2(left_real_rgb)
+ cv2.imwrite('reproject_images/reproject_twice_/reproject'+ str(inpaint_number) +'.png',cv2.cvtColor(left_new_image,cv2.COLOR_BGR2RGB))
+ cv2.imwrite('reproject_images/reproject_inpaint_twice_/reproject'+ str(inpaint_number) +'.png',cv2.cvtColor(self.cv_bridge_.imgmsg_to_cv2(left_real_rgb),cv2.COLOR_BGR2RGB))
+
+ left_real_rgb = self.cv_bridge_.imgmsg_to_cv2(left_real_rgb) # COMMENT THIS OUT SOON
+ if(self.float_image_):
+ second_reproject_inpaint = (second_reproject_inpaint / 255).astype(np.float32)
+ second_reproject_straight_up = (second_reproject_straight_up / 255).astype(np.float32)
+ first_reproject_inpaint = (first_reproject_inpaint / 255).astype(np.float32)
+ first_reproject_straight_up = (first_reproject_straight_up / 255).astype(np.float32)
+
+ left_inpainted_image,left_mask = left_real_rgb,left_real_rgb
+ right_inpainted_image,right_mask = left_inpainted_image,left_mask
+ #right_real_rgb,right_real_depth = self.reproject(left_real_rgb,left_real_depth,reproject_tf)
+ #right_real_rgb,right_real_depth = self.reproject(left_real_rgb,left_real_depth,np.linalg.inv(reproject_tf))
+ #left_inpainted_image,left_mask = self.doFullInpainting(self.ur5_left_rgb_,self.ur5_left_depth_,self.panda_left_rgb_,self.panda_left_depth_,left_real_rgb,left_real_depth)
+ #right_inpainted_image, right_mask = self.doFullInpainting(self.ur5_right_rgb_,self.ur5_right_depth_,self.panda_right_rgb_,self.panda_right_depth_,right_real_rgb,right_real_depth)
+ inpainted_image_msg = MultipleInpaintImages()
+ mask_image_msg = MultipleInpaintImages()
+ inpainted_image_msg.images = [self.cv_bridge_.cv2_to_imgmsg(second_reproject_inpaint),self.cv_bridge_.cv2_to_imgmsg(second_reproject_straight_up),self.cv_bridge_.cv2_to_imgmsg(first_reproject_inpaint),self.cv_bridge_.cv2_to_imgmsg(first_reproject_straight_up)]
+ mask_image_msg.images = []
+ self.inpainted_publisher_.publish(inpainted_image_msg)
+ self.full_mask_image_publisher_.publish(mask_image_msg)
+ inpaint_number = str(self.i_).zfill(5)
+ if not os.path.exists('left_inpainting'):
+ os.makedirs('left_inpainting')
+ if not os.path.exists('right_inpainting'):
+ os.makedirs('right_inpainting')
+ if not os.path.exists('left_mask'):
+ os.makedirs('left_mask')
+ if not os.path.exists('right_mask'):
+ os.makedirs('right_mask')
+ if not os.path.exists('left_rgb_no_inpainting'):
+ os.makedirs('left_rgb_no_inpainting')
+ if not os.path.exists('right_rgb_no_inpainting'):
+ os.makedirs('right_rgb_no_inpainting')
+ if(self.float_image_):
+ left_inpainted_image = (left_inpainted_image * 255).astype(np.uint8)
+ right_inpainted_image = (right_inpainted_image * 255).astype(np.uint8)
+ left_mask = (left_mask * 255).astype(np.uint8)
+ right_mask = (right_mask * 255).astype(np.uint8)
+ left_new_image = (left_new_image * 255).astype(np.uint8)
+ cv2.imwrite('left_inpainting/inpaint'+ str(inpaint_number) +'.png',left_inpainted_image)
+ cv2.imwrite('right_inpainting/inpaint'+ str(inpaint_number) +'.png',right_inpainted_image)
+ cv2.imwrite('left_mask/mask'+ str(inpaint_number) +'.png',left_mask)
+ cv2.imwrite('right_mask/mask'+ str(inpaint_number) +'.png',right_mask)
+ cv2.imwrite('left_rgb_no_inpainting/rgb'+ str(inpaint_number) +'.png',left_new_image)
end_time = time.time()
print("Algo time Part 3: " + str(end_time - start_time) + " seconds")
return
@@ -276,7 +474,7 @@ def gazeboCallback(self,joints,real_rgb,real_depth,gazebo_rgb,gazebo_depth,):
gazebo_seg_255_np = 255 * gazebo_seg_np
cv2.imwrite('real_seg.png',real_seg_255_np)
cv2.imwrite('gazebo_seg.png',gazebo_seg_255_np)
- real_rgb = self.real_rgb_
+ real_rgb = self.left_real_rgb_
real_depth = self.real_depth_
real_seg = real_seg_255_np
gazebo_rgb_np = cv2.cvtColor(gazebo_rgb_np,cv2.COLOR_BGR2RGB)
@@ -491,10 +689,10 @@ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
outline_mask = abs(inverted_segmentation_mask_255 - inverted_segmentation_mask_255_original)*255
gazebo_only = cv2.bitwise_and(gazebo_rgb,gazebo_rgb,mask=gazebo_segmentation_mask_255)
# gazebo_only = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2RGB)
- gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
+ #gazebo_robot_only_lab = cv2.cvtColor(gazebo_only,cv2.COLOR_BGR2LAB)
#gazebo_robot_only_lab[:,:,0] += 10
- gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
- gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
+ #gazebo_robot_only_lab[:,:,0] = np.where(gazebo_segmentation_mask_255 > 0, gazebo_robot_only_lab[:,:,0] + 150, gazebo_robot_only_lab[:,:,0])
+ #gazebo_only = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR)
cv2.imwrite('gazebo_robot_only.png',gazebo_only)
background_only = cv2.bitwise_and(rgb,rgb,mask=inverted_segmentation_mask_255_original)
inverted_seg_file_original = cv2.bitwise_not(seg_file)
@@ -513,6 +711,7 @@ def dummyInpainting(self,rgb,seg_file,gazebo_rgb,gazebo_seg):
target_mask = np.zeros_like(inpainted_image)
target_mask[:,:] = target_color
inpainted_image = np.where(better_dilated_blend_mask[:,:,None] != 0,target_mask,inpainted_image).astype(np.uint8)
+ return self.cv_bridge_.cv2_to_imgmsg(inpainted_image,encoding="bgr8")
# inpainted_image = cv2_inpaint_image
inpaint_number = str(self.i_).zfill(5)
if not os.path.exists('inpainting'):
@@ -1013,28 +1212,41 @@ def listenerCallback(self,msg):
def listenerCallbackOnlineDebug(self,msg):
start_time = time.time()
- self.i_ += 1
- online_input_folder = 'offline_ur5e_input'
- if not os.path.exists(online_input_folder):
- os.makedirs(online_input_folder)
- online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
- if not os.path.exists(online_input_num_folder):
- os.makedirs(online_input_num_folder)
-
- rgb_np = self.cv_bridge_.imgmsg_to_cv2(msg.rgb)
+ if(len(msg.data_pieces) == 2):
+ self.i_ += 1
+ online_input_folder = 'offline_ur5e_input'
+ if not os.path.exists(online_input_folder):
+ os.makedirs(online_input_folder)
+ online_input_num_folder = online_input_folder + '/offline_ur5e_' + str(int(self.i_ / 2))
+ if not os.path.exists(online_input_num_folder):
+ os.makedirs(online_input_num_folder)
+ left_msg = msg.data_pieces[0]
+ right_msg = msg.data_pieces[1]
+ elif(len(msg.data_pieces) == 1):
+ left_msg = msg.data_pieces[0]
+ left_rgb_np = self.cv_bridge_.imgmsg_to_cv2(left_msg.rgb)
+ if(left_rgb_np.dtype == np.float32):
+ self.float_image_ = True
+ left_rgb_np = (left_rgb_np * 255).astype(np.uint8)
# rgb_np = np.array(msg.rgb,dtype=np.uint8).reshape((msg.segmentation.width,msg.segmentation.height,3))
- cv2.imwrite(online_input_num_folder+'/rgb.png',rgb_np)
- depth_np = np.array(msg.depth_map,dtype=np.float64).reshape((msg.rgb.height,msg.rgb.width))
- np.save(online_input_num_folder+'/depth.npy',depth_np)
- cv2.imwrite('inf_mask.png',np.isinf(depth_np).astype(np.uint8)*255)
- cv2.imwrite('nan_mask.png',np.isnan(depth_np).astype(np.uint8)*255)
- depth_np[np.isinf(depth_np) & (depth_np < 0)] = 0.01
- depth_np[np.isinf(depth_np) & (depth_np > 0)] = 10
- depth_np[np.isnan(depth_np)] = 0
- cv2.imwrite('og_depth.png',self.normalize_depth_image(depth_np))
- ur5_joints = np.array(msg.joints)
+ left_depth_np = np.array(left_msg.depth_map,dtype=np.float64).reshape((left_msg.rgb.height,left_msg.rgb.width))
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np < 0)] = 0.01
+ left_depth_np[np.isinf(left_depth_np) & (left_depth_np > 0)] = 10
+ left_depth_np[np.isnan(left_depth_np)] = 0
+
+ # right_rgb_np = self.cv_bridge_.imgmsg_to_cv2(right_msg.rgb)
+ # if(right_rgb_np.dtype == np.float32):
+ # self.float_image_ = True
+ # right_rgb_np = (right_rgb_np * 255).astype(np.uint8)
+ # right_depth_np = np.array(right_msg.depth_map,dtype=np.float64).reshape((right_msg.rgb.height,right_msg.rgb.width))
+ # right_depth_np[np.isinf(right_depth_np) & (right_depth_np < 0)] = 0.01
+ # right_depth_np[np.isinf(right_depth_np) & (right_depth_np > 0)] = 10
+ # right_depth_np[np.isnan(right_depth_np)] = 0
+
+ ur5_joints = np.array(left_msg.joints)
+ ur5_joints = ur5_joints[:-1]
ur5_joints[0] += math.pi
- ee_pose = self.ur5_solver_.fk(ur5_joints)
+ ee_pose = self.ur5_solver_.fk(ur5_joints[:-1])
end_effector_rotation_with_no_translation = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
ee_pose = ee_pose @ end_effector_rotation_with_no_translation
scipy_rotation = R.from_matrix(ee_pose[:3,:3])
@@ -1060,8 +1272,11 @@ def listenerCallbackOnlineDebug(self,msg):
qout_msg = Float64MultiArray()
qout_msg.data = qout_list
self.ur5_and_panda_joint_command_publisher_.publish(qout_msg)
- self.real_rgb_ = msg.rgb
- self.real_depth_ = depth_np
+ self.left_real_rgb_ = left_msg.rgb
+ # self.right_real_rgb_ = right_msg.rgb
+
+ self.left_real_depth_ = left_depth_np
+ #self.right_real_depth_ = right_depth_np
self.real_qout_list_ = qout_list
end_time = time.time()
print("Algo time Part 1: " + str(end_time - start_time) + " seconds")
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_gripper_control_plugin/ur5_and_panda_gripper_control_plugin.cc b/mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_gripper_early_inpaint_control_plugin/ur5_and_panda_gripper_early_inpaint_control_plugin.cc
similarity index 79%
rename from mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_gripper_control_plugin/ur5_and_panda_gripper_control_plugin.cc
rename to mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_gripper_early_inpaint_control_plugin/ur5_and_panda_gripper_early_inpaint_control_plugin.cc
index 5917d2f..fb1f09d 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_gripper_control_plugin/ur5_and_panda_gripper_control_plugin.cc
+++ b/mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_gripper_early_inpaint_control_plugin/ur5_and_panda_gripper_early_inpaint_control_plugin.cc
@@ -1,5 +1,5 @@
-#ifndef _UR5PANDANOGRIPPERCONTROL_PLUGIN_HH_
-#define _UR5PANDANOGRIPPERCONTROL_PLUGIN_HH_
+#ifndef _UR5PANDANOGRIPPEREARLYINPAINTCONTROL_PLUGIN_HH_
+#define _UR5PANDANOGRIPPEREARLYINPAINTCONTROL_PLUGIN_HH_
#include
#include
@@ -18,10 +18,10 @@
namespace gazebo
{
/// \brief A plugin to control a NoPhysics sensor.
- class Ur5PandaGripperControlPlugin : public ModelPlugin
+ class Ur5PandaGripperEarlyInpaintControlPlugin : public ModelPlugin
{
/// \brief Constructor
- public: Ur5PandaGripperControlPlugin() {}
+ public: Ur5PandaGripperEarlyInpaintControlPlugin() {}
/// \brief The load function is called by Gazebo when the plugin is
/// inserted into simulation
@@ -35,11 +35,11 @@ namespace gazebo
this->model_name_ = _model->GetName();
const gazebo_ros::QoS &qos = this->node_->get_qos();
// Just output a message for now
- std::cerr << "KUSHTIMUS PRIME PANDA AND UR5 WITH GRIPPER" << "\n";
+ std::cerr << "KUSHTIMUS PRIME PANDA AND UR5 WITH GRIPPER EARLY INPAINT" << "\n";
this->robot_subscriber_ = this->node_->create_subscription(
"joint_commands",
qos.get_subscription_qos("joint_commands", rclcpp::QoS(1)),
- std::bind(&Ur5PandaGripperControlPlugin::jointCommandMsg, this, std::placeholders::_1));
+ std::bind(&Ur5PandaGripperEarlyInpaintControlPlugin::jointCommandMsg, this, std::placeholders::_1));
this->gazebo_joint_state_publisher_ = this->node_->create_publisher("/gazebo_joint_states",1);
}
@@ -76,6 +76,13 @@ namespace gazebo
auto left_finger = model_ptr->GetJoint("robotiq_85_left_finger_tip_joint");
auto right_finger = model_ptr->GetJoint("robotiq_85_right_finger_tip_joint");
+ auto ur5_no_gripper_joint1 = model_ptr->GetJoint("ur5_no_gripper_shoulder_pan_joint");
+ auto ur5_no_gripper_joint2 = model_ptr->GetJoint("ur5_no_gripper_shoulder_lift_joint");
+ auto ur5_no_gripper_joint3 = model_ptr->GetJoint("ur5_no_gripper_elbow_joint");
+ auto ur5_no_gripper_joint4 = model_ptr->GetJoint("ur5_no_gripper_wrist_1_joint");
+ auto ur5_no_gripper_joint5 = model_ptr->GetJoint("ur5_no_gripper_wrist_2_joint");
+ auto ur5_no_gripper_joint6 = model_ptr->GetJoint("ur5_no_gripper_wrist_3_joint");
+
// Needs to coordinate with custon_joint_state_publisher_node.py
ur5_joint1->SetPosition(0,msg->data[0]);
ur5_joint2->SetPosition(0,msg->data[1]);
@@ -99,11 +106,18 @@ namespace gazebo
panda_joint7->SetPosition(0,msg->data[18]);
panda_finger_joint1->SetPosition(0,msg->data[19]);
panda_finger_joint2->SetPosition(0,msg->data[20]);
+
+ ur5_no_gripper_joint1->SetPosition(0,msg->data[0]);
+ ur5_no_gripper_joint2->SetPosition(0,msg->data[1]);
+ ur5_no_gripper_joint3->SetPosition(0,msg->data[2]);
+ ur5_no_gripper_joint4->SetPosition(0,msg->data[3]);
+ ur5_no_gripper_joint5->SetPosition(0,msg->data[4]);
+ ur5_no_gripper_joint6->SetPosition(0,msg->data[5]);
usleep(200000);
auto message = sensor_msgs::msg::JointState();
message.header.stamp = this->node_->get_clock() ->now();
- message.name = {"shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint","panda_joint1","panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"};
- message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[7],msg->data[8],msg->data[9],msg->data[10],msg->data[11],msg->data[12],msg->data[13],msg->data[14],msg->data[15],msg->data[16],msg->data[17],msg->data[18],msg->data[19],msg->data[20]};
+ message.name = {"shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","robotiq_85_left_knuckle_joint","robotiq_85_right_knuckle_joint","robotiq_85_left_inner_knuckle_joint","robotiq_85_right_inner_knuckle_joint","robotiq_85_left_finger_tip_joint","robotiq_85_right_finger_tip_joint","panda_joint1","panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2","ur5_no_gripper_shoulder_pan_joint","ur5_no_gripper_shoulder_lift_joint","ur5_no_gripper_elbow_joint","ur5_no_gripper_wrist_1_joint","ur5_no_gripper_wrist_2_joint","ur5_no_gripper_wrist_3_joint"};
+ message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[7],msg->data[8],msg->data[9],msg->data[10],msg->data[11],msg->data[12],msg->data[13],msg->data[14],msg->data[15],msg->data[16],msg->data[17],msg->data[18],msg->data[19],msg->data[20],msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5]};
//Delay for image to catch up to joint position update
this->gazebo_joint_state_publisher_->publish(message);
@@ -128,6 +142,6 @@ namespace gazebo
};
// Tell Gazebo about this plugin, so that Gazebo can call Load on this plugin.
- GZ_REGISTER_MODEL_PLUGIN(Ur5PandaGripperControlPlugin)
+ GZ_REGISTER_MODEL_PLUGIN(Ur5PandaGripperEarlyInpaintControlPlugin)
}
#endif
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_no_gripper_control_plugin/ur5_and_panda_no_gripper_control_plugin.cc b/mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_no_gripper_early_inpaint_control_plugin/ur5_and_panda_no_gripper_early_inpaint_control_plugin.cc
similarity index 74%
rename from mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_no_gripper_control_plugin/ur5_and_panda_no_gripper_control_plugin.cc
rename to mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_no_gripper_early_inpaint_control_plugin/ur5_and_panda_no_gripper_early_inpaint_control_plugin.cc
index 9dd1333..951a9c4 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_no_gripper_control_plugin/ur5_and_panda_no_gripper_control_plugin.cc
+++ b/mirage/mirage/ros_ws/src/gazebo_env/ur5_and_panda_no_gripper_early_inpaint_control_plugin/ur5_and_panda_no_gripper_early_inpaint_control_plugin.cc
@@ -1,5 +1,5 @@
-#ifndef _UR5PANDANOGRIPPERCONTROL_PLUGIN_HH_
-#define _UR5PANDANOGRIPPERCONTROL_PLUGIN_HH_
+#ifndef _UR5PANDANOGRIPPEREARLYINPAINTCONTROL_PLUGIN_HH_
+#define _UR5PANDANOGRIPPEREARLYINPAINTCONTROL_PLUGIN_HH_
#include
#include
@@ -18,10 +18,10 @@
namespace gazebo
{
/// \brief A plugin to control a NoPhysics sensor.
- class Ur5PandaNoGripperControlPlugin : public ModelPlugin
+ class Ur5PandaNoGripperEarlyInpaintControlPlugin : public ModelPlugin
{
/// \brief Constructor
- public: Ur5PandaNoGripperControlPlugin() {}
+ public: Ur5PandaNoGripperEarlyInpaintControlPlugin() {}
/// \brief The load function is called by Gazebo when the plugin is
/// inserted into simulation
@@ -35,11 +35,11 @@ namespace gazebo
this->model_name_ = _model->GetName();
const gazebo_ros::QoS &qos = this->node_->get_qos();
// Just output a message for now
- std::cerr << "KUSHTIMUS PRIME PANDA AND UR5 NO GRIPPER" << "\n";
+ std::cerr << "KUSHTIMUS PRIME PANDA AND UR5 NO GRIPPER EARLY INPAINT" << "\n";
this->robot_subscriber_ = this->node_->create_subscription(
"joint_commands",
qos.get_subscription_qos("joint_commands", rclcpp::QoS(1)),
- std::bind(&Ur5PandaNoGripperControlPlugin::jointCommandMsg, this, std::placeholders::_1));
+ std::bind(&Ur5PandaNoGripperEarlyInpaintControlPlugin::jointCommandMsg, this, std::placeholders::_1));
this->gazebo_joint_state_publisher_ = this->node_->create_publisher("/gazebo_joint_states",1);
}
@@ -68,6 +68,14 @@ namespace gazebo
auto ur5_joint5 = model_ptr->GetJoint("wrist_2_joint");
auto ur5_joint6 = model_ptr->GetJoint("wrist_3_joint");
+ auto panda_no_gripper_joint1 = model_ptr->GetJoint("panda_no_gripper_joint1");
+ auto panda_no_gripper_joint2 = model_ptr->GetJoint("panda_no_gripper_joint2");
+ auto panda_no_gripper_joint3 = model_ptr->GetJoint("panda_no_gripper_joint3");
+ auto panda_no_gripper_joint4 = model_ptr->GetJoint("panda_no_gripper_joint4");
+ auto panda_no_gripper_joint5 = model_ptr->GetJoint("panda_no_gripper_joint5");
+ auto panda_no_gripper_joint6 = model_ptr->GetJoint("panda_no_gripper_joint6");
+ auto panda_no_gripper_joint7 = model_ptr->GetJoint("panda_no_gripper_joint7");
+
// Needs to coordinate with custon_joint_state_publisher_node.py
ur5_joint1->SetPosition(0,msg->data[0]);
ur5_joint2->SetPosition(0,msg->data[1]);
@@ -83,11 +91,19 @@ namespace gazebo
panda_joint5->SetPosition(0,msg->data[10]);
panda_joint6->SetPosition(0,msg->data[11]);
panda_joint7->SetPosition(0,msg->data[12]);
+
+ panda_no_gripper_joint1->SetPosition(0,msg->data[6]);
+ panda_no_gripper_joint2->SetPosition(0,msg->data[7]);
+ panda_no_gripper_joint3->SetPosition(0,msg->data[8]);
+ panda_no_gripper_joint4->SetPosition(0,msg->data[9]);
+ panda_no_gripper_joint5->SetPosition(0,msg->data[10]);
+ panda_no_gripper_joint6->SetPosition(0,msg->data[11]);
+ panda_no_gripper_joint7->SetPosition(0,msg->data[12]);
usleep(200000);
auto message = sensor_msgs::msg::JointState();
message.header.stamp = this->node_->get_clock() ->now();
- message.name = {"shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","panda_joint1","panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"};
- message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[7],msg->data[8],msg->data[9],msg->data[10],msg->data[11],msg->data[12]};
+ message.name = {"shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint","wrist_1_joint", "wrist_2_joint", "wrist_3_joint","panda_joint1","panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2","panda_no_gripper_joint1","panda_no_gripper_joint2","panda_no_gripper_joint3","panda_no_gripper_joint4","panda_no_gripper_joint5","panda_no_gripper_joint6","panda_no_gripper_joint7"};
+ message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6],msg->data[7],msg->data[8],msg->data[9],msg->data[10],msg->data[11],msg->data[12],msg->data[7],msg->data[8],msg->data[9],msg->data[10],msg->data[11],msg->data[12]};
//Delay for image to catch up to joint position update
this->gazebo_joint_state_publisher_->publish(message);
@@ -112,6 +128,6 @@ namespace gazebo
};
// Tell Gazebo about this plugin, so that Gazebo can call Load on this plugin.
- GZ_REGISTER_MODEL_PLUGIN(Ur5PandaNoGripperControlPlugin)
+ GZ_REGISTER_MODEL_PLUGIN(Ur5PandaNoGripperEarlyInpaintControlPlugin)
}
#endif
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/image_diff.py b/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/image_diff.py
deleted file mode 100644
index 21b80b4..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/image_diff.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import cv2
-
-image1 = cv2.imread('gazebo_img.jpg')
-image2 = cv2.imread('img.jpg')
-image_difference = cv2.absdiff(image1, image2)
-
- # Save the difference image
-cv2.imwrite('image_difference.png', image_difference)
-
-# import cv2
-# import numpy as np
-
-# # Load the two black and white images (replace with your image paths)
-# image1 = cv2.imread('image1.png', cv2.IMREAD_GRAYSCALE)
-# image2 = cv2.imread('image2.png', cv2.IMREAD_GRAYSCALE)
-
-# # Create a blank blue channel (all zeros)
-# blue_channel = np.zeros_like(image1)
-
-# # Create an RGB image by combining the two images on the red and green channels
-# composite_image = cv2.merge((image2, image1, blue_channel))
-
-# # Save or display the composite image
-# cv2.imwrite('composite_image.png', composite_image)
-
-# # Display the composite image
-# cv2.imshow('Composite Image', composite_image)
-# cv2.waitKey(0)
-# cv2.destroyAllWindows()
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/image_normalization.py b/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/image_normalization.py
deleted file mode 100644
index 90b7822..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/image_normalization.py
+++ /dev/null
@@ -1,156 +0,0 @@
-import cv2
-import numpy as np
-
-# This function will return us the image stats
-# We input an image in the L*a*b* color space and it returns
-# a tuple of mean and std for L*, a* and b* respectively.
-def image_stats(image):
- # Compute the mean and standard deviation of each channel
- (l, a, b) = cv2.split(image)
- (l_mean, l_std) = (l.mean(), l.std())
- (a_mean, a_std) = (a.mean(), a.std())
- (b_mean, b_std) = (b.mean(), b.std())
-
- # return the color statistics
- return (l_mean, l_std, a_mean, a_std, b_mean, b_std)
-
-
-# This function will perform color transfer from one input image (source)
-# onto another input image (destination)
-def color_transfer(source, destination):
- # Convert the images from the RGB to L*a*b* color space, being
- # sure to utilizing the floating point data type (note: OpenCV
- # expects floats to be 32-bit, so use that instead of 64-bit)
- source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
- destination = cv2.cvtColor(destination, cv2.COLOR_BGR2LAB).astype("float32")
-
- # Compute color statistics for the source and destination images
- (l_mean_src, l_std_src, a_mean_src, a_std_src, b_mean_src, b_std_src) = image_stats(source)
- (l_mean_dest, l_std_dest, a_mean_dest, a_std_dest, b_mean_dest, b_std_dest) = image_stats(destination)
-
- # Subtract the means from the destination image
- (l, a, b) = cv2.split(destination)
- l -= l_mean_dest
- a -= a_mean_dest
- b -= b_mean_dest
-
- # Scale by the standard deviations
- l = (l_std_dest / l_std_src) * l
- a = (a_std_dest / a_std_src) * a
- b = (b_std_dest / b_std_src) * b
-
- # Add in the source mean
- l += l_mean_src
- a += a_mean_src
- b += b_mean_src
-
- # Clip the pixel intensities to [0, 255] if they fall outside this range
- l = np.clip(l, 0, 255)
- a = np.clip(a, 0, 255)
- b = np.clip(b, 0, 255)
-
- # Merge the channels together and convert back to the RGB color space,
- # being sure to utilize the 8-bit unsigned integer data type.
- transfer = cv2.merge([l, a, b])
- transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)
-
- # Return the color transferred image
- return transfer
-
-# Load the original image and the segmentation gazebo_mask
-gazebo_original_image = cv2.imread('gazebo_img.jpg')
-gazebo_mask = cv2.imread('gazebo_mask.jpg', cv2.IMREAD_GRAYSCALE)
-_, gazebo_mask = cv2.threshold(gazebo_mask, 128, 255, cv2.THRESH_BINARY)
-# import pdb
-# pdb.set_trace()
-# gazebo_mask = np.expand_dims(gazebo_mask, axis=-1)
-# result = gazebo_original_image * gazebo_mask
-# result = result.squeeze()
-# import pdb
-# pdb.set_trace()
-# # Ensure both the image and the gazebo_mask have the same dimensions
-gazebo_mask = cv2.resize(gazebo_mask, (gazebo_original_image.shape[1], gazebo_original_image.shape[0]))
-
-# Create a new image with the same size as the original image
-gazebo_masked_image = np.zeros_like(gazebo_original_image)
-
-# Apply the gazebo_mask to the original image using element-wise multiplication
-gazebo_masked_image = cv2.bitwise_and(gazebo_original_image, gazebo_original_image, mask=gazebo_mask)
-
-# Save or display the resulting gazebo_masked image
-cv2.imwrite('gazebo_masked_image.jpg', gazebo_masked_image) # Save the gazebo_masked image
-
-# Load the original image and the segmentation gazebo_mask
-isaac_original_image = cv2.imread('img.jpg')
-isaac_mask = cv2.imread('seg.jpg', cv2.IMREAD_GRAYSCALE)
-_, isaac_mask = cv2.threshold(isaac_mask, 128, 255, cv2.THRESH_BINARY)
-# import pdb
-# pdb.set_trace()
-# gazebo_mask = np.expand_dims(gazebo_mask, axis=-1)
-# result = gazebo_original_image * gazebo_mask
-# result = result.squeeze()
-# import pdb
-# pdb.set_trace()
-# # Ensure both the image and the gazebo_mask have the same dimensions
-isaac_mask = cv2.resize(isaac_mask, (isaac_original_image.shape[1], isaac_original_image.shape[0]))
-
-# Create a new image with the same size as the original image
-isaac_masked_image = np.zeros_like(isaac_original_image)
-
-# Apply the gazebo_mask to the original image using element-wise multiplication
-isaac_masked_image = cv2.bitwise_and(isaac_original_image, isaac_original_image, mask=isaac_mask)
-
-# Save or display the resulting gazebo_masked image
-cv2.imwrite('isaac_masked_image.jpg', isaac_masked_image) # Save the gazebo_masked image
-
-source_image = isaac_masked_image
-target_image = gazebo_masked_image
-
-# Convert source and target images to LAB color space
-source_lab = cv2.cvtColor(source_image, cv2.COLOR_BGR2LAB)
-target_lab = cv2.cvtColor(target_image, cv2.COLOR_BGR2LAB)
-
-# Extract the luminance channel (L) from LAB color space
-source_luminance = source_lab[:,:,0]
-target_luminance = target_lab[:,:,0]
-
-# Perform histogram matching on the luminance channel
-clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
-matched_luminance = clahe.apply(source_luminance)
-
-# Replace the luminance channel in the source LAB image with the matched luminance
-target_lab[:,:,0] = matched_luminance
-
-# Convert the modified LAB image back to BGR color space
-matched_image = cv2.cvtColor(target_lab, cv2.COLOR_LAB2BGR)
-
-cv2.imwrite('output.jpg', cv2.absdiff(isaac_masked_image,gazebo_masked_image))
-
-# Load the original image and the segmentation gazebo_mask
-second_gazebo_original_image = cv2.imread('second_image.jpg')
-second_gazebo_mask = cv2.imread('second_image_mask.jpg', cv2.IMREAD_GRAYSCALE)
-_, second_gazebo_mask = cv2.threshold(second_gazebo_mask, 128, 255, cv2.THRESH_BINARY)
-# import pdb
-# pdb.set_trace()
-# gazebo_mask = np.expand_dims(gazebo_mask, axis=-1)
-# result = gazebo_original_image * gazebo_mask
-# result = result.squeeze()
-# import pdb
-# pdb.set_trace()
-# # Ensure both the image and the gazebo_mask have the same dimensions
-second_gazebo_mask = cv2.resize(second_gazebo_mask, (second_gazebo_original_image.shape[1], second_gazebo_original_image.shape[0]))
-
-# Create a new image with the same size as the original image
-second_gazebo_masked_image = np.zeros_like(second_gazebo_original_image)
-
-# Apply the gazebo_mask to the original image using element-wise multiplication
-second_gazebo_masked_image = cv2.bitwise_and(second_gazebo_original_image, second_gazebo_original_image, mask=second_gazebo_mask)
-cv2.imwrite('second_try.jpg',second_gazebo_masked_image)
-target2_image = second_gazebo_masked_image
-target2_lab = cv2.cvtColor(target2_image, cv2.COLOR_BGR2LAB)
-target2_luminance = target2_lab[:,:,0]
-matched2_luminance = clahe.apply(source_luminance)
-target2_lab[:,:,0] = matched2_luminance
-matched2_image = cv2.cvtColor(target2_lab, cv2.COLOR_LAB2BGR)
-
-cv2.imwrite('second_try_luminance.jpg', matched2_image)
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/visualize_pcd.py b/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/visualize_pcd.py
deleted file mode 100644
index a5d1bc7..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/visualize_pcd.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import open3d as o3d
-import numpy as np
-# Load a PCD file
-pcd = o3d.io.read_point_cloud("point_cloud.pcd") # Replace with the path to your PCD file
-gazebo_pcd = o3d.io.read_point_cloud("gazebo_pointcloud.pcd")
-mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame()
-#pcd.rotate(gazebo_rotation,[0,0,0])
-# Visualize the point cloud
-
-o3d.visualization.draw_geometries([pcd,gazebo_pcd,mesh_coordinate_frame])
-
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/warp.py b/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/warp.py
deleted file mode 100644
index 61283fd..0000000
--- a/mirage/mirage/ros_ws/src/gazebo_env/ur5_only_point_cloud/warp.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import cv2
-import numpy as np
-
-# Load the two binary masks (white object on black background)
-mask1 = cv2.imread('gazebo_mask.jpg', cv2.IMREAD_GRAYSCALE)
-mask2 = cv2.imread('seg.jpg', cv2.IMREAD_GRAYSCALE)
-
-# Find contours in the thresholded image
-contours, _ = cv2.findContours(mask1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
-# Merge all individual contours into a single contour
-# Merge all individual contours into a single contour
-combined_contour = np.concatenate(contours)
-# Create an image with the combined contour
-image_with_combined_contour = cv2.cvtColor(mask1, cv2.COLOR_GRAY2BGR)
-cv2.drawContours(image_with_combined_contour, [combined_contour], -1, (0, 0, 255), 2)
-
-# Display the image with the combined contour
-cv2.imshow('Image with Combined Contour', image_with_combined_contour)
-
-# Wait for a key press and then close the window
-cv2.waitKey(0)
-cv2.destroyAllWindows()
-
-# Find the binary difference between the two masks
-diff_mask = cv2.absdiff(mask1, mask2)
-cv2.imwrite('diff_mask.png',diff_mask)
-# Use contour detection to find the object in the difference mask
-contours, _ = cv2.findContours(diff_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
-
-# Assuming only one object is detected, calculate its centroid
-if len(contours) == 1:
- M = cv2.moments(contours[0])
- cx = int(M['m10'] / M['m00'])
- cy = int(M['m01'] / M['m00'])
-
- # Calculate translation to align mask2 with mask1
- translation_matrix = np.array([[1, 0, mask1.shape[1] // 2 - cx], [0, 1, mask1.shape[0] // 2 - cy]], dtype=np.float32)
-
- # Calculate rotation angle using Hough Line Transform
- lines = cv2.HoughLines(diff_mask, 1, np.pi / 180, 200)
- if lines is not None:
- for rho, theta in lines[0]:
- rotation_angle = (theta - np.pi / 2) * 180 / np.pi # Convert to degrees
- break
- else:
- rotation_angle = 0 # Default to no rotation
-
- # Calculate scaling factor based on object sizes
- object1_size = cv2.countNonZero(mask1)
- object2_size = cv2.countNonZero(mask2)
- scaling_factor = object1_size / object2_size
-
- # Apply similarity transformation to align mask2 with mask1
- similarity_matrix = cv2.getRotationMatrix2D((mask2.shape[1] // 2, mask2.shape[0] // 2), rotation_angle, scaling_factor)
- aligned_mask2 = cv2.warpAffine(mask2, similarity_matrix, (mask2.shape[1], mask2.shape[0]))
- aligned_mask2 = cv2.warpAffine(aligned_mask2, translation_matrix, (mask1.shape[1], mask1.shape[0]))
-
- # Save or display the aligned mask2
- cv2.imwrite('aligned_mask2.png', aligned_mask2)
-else:
- print("Multiple or no objects detected in the difference mask.")
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_sim.world b/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_sim.world
index 29ead2e..604c040 100644
--- a/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_sim.world
+++ b/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_sim.world
@@ -32,7 +32,7 @@
- 19.45 0.0 10.651246 0.0 1.57 0
+ 14.560474 0.0 10.651246 0.0 1.57 0
0.5 0.5 0.5 1
0.1 0.1 0.1 1
0 0 -1
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_wrist.world b/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_wrist.world
new file mode 100644
index 0000000..031431f
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_wrist.world
@@ -0,0 +1,108 @@
+
+
+
+
+
+ model://ground_plane
+
+
+ 0.5 0.5 0.5 1
+ 0
+
+
+ 10 0 20 0 0 0
+ 0.5 0.5 0.5 1
+ 0.1 0.1 0.1 1
+ 0 0 -1
+
+ 20
+ 0.5
+ 0.01
+ 0.001
+
+ 0
+
+ 0.6
+ 1
+ 1
+
+
+
+ -9.389126 -0.047634 10.268400 -3.14 1.57 0
+ 0.5 0.5 0.5 1
+ 0.1 0.1 0.1 1
+ 0 0 -1
+
+ 20
+ 0.5
+ 0.01
+ 0.001
+
+ 0
+
+ 0.6
+ 1
+ 1
+
+
+
+
+ 14.6959 5.3710 10.651246 -0.7809 1.57 0
+ 0.5 0.5 0.5 1
+ 0.1 0.1 0.1 1
+ 0 0 -1
+
+ 20
+ 0.5
+ 0.01
+ 0.001
+
+ 0
+
+ 0.6
+ 1
+ 1
+
+
+
+
+
+ 0 0 13.89 0 0 0
+ 0.5 0.5 0.5 1
+ 0.1 0.1 0.1 1
+ 0 0 -1
+
+ 20
+ 0.5
+ 0.01
+ 0.001
+
+ 0
+
+ 0.6
+ 1
+ 1
+
+
+
+ -1.6959 -2.3710 10.651246 2.3591 1.57 0
+ 0.5 0.5 0.5 1
+ 0.1 0.1 0.1 1
+ 0 0 -1
+
+ 20
+ 0.5
+ 0.01
+ 0.001
+
+ 0
+
+ 0.6
+ 1
+ 1
+
+
+
+
diff --git a/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_wrist_dark_ur5.world b/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_wrist_dark_ur5.world
new file mode 100644
index 0000000..004022f
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/gazebo_env/worlds/no_shadow_wrist_dark_ur5.world
@@ -0,0 +1,90 @@
+
+
+
+
+
+ model://ground_plane
+
+
+ 0.5 0.5 0.5 1
+ 0
+
+
+ 10 0 20 0 0 0
+ 0.5 0.5 0.5 1
+ 0.1 0.1 0.1 1
+ 0 0 -1
+
+ 20
+ 0.5
+ 0.01
+ 0.001
+
+ 0
+
+ 0.6
+ 1
+ 1
+
+
+
+
+ 14.6959 5.3710 10.651246 -0.7809 1.57 0
+ 0.5 0.5 0.5 1
+ 0.1 0.1 0.1 1
+ 0 0 -1
+
+ 20
+ 0.5
+ 0.01
+ 0.001
+
+ 0
+
+ 0.6
+ 1
+ 1
+
+
+
+
+
+ 0 0 13.89 0 0 0
+ 0.5 0.5 0.5 1
+ 0.1 0.1 0.1 1
+ 0 0 -1
+
+ 20
+ 0.5
+ 0.01
+ 0.001
+
+ 0
+
+ 0.6
+ 1
+ 1
+
+
+
+ 4.6959 5.3710 10.651246 -0.7809 1.57 0
+ 0.5 0.5 0.5 1
+ 0.1 0.1 0.1 1
+ 0 0 -1
+
+ 20
+ 0.5
+ 0.01
+ 0.001
+
+ 0
+
+ 0.6
+ 1
+ 1
+
+
+
+
diff --git a/mirage/mirage/ros_ws/src/robotiq_controllers/CHANGELOG.rst b/mirage/mirage/ros_ws/src/robotiq_controllers/CHANGELOG.rst
deleted file mode 100644
index 467d0a3..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_controllers/CHANGELOG.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Changelog for package robotiq_controllers
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-0.0.1 (2023-07-17)
-------------------
-* Initial ROS 2 release of robotiq_controllers
- * This package is not supported by Robotiq but is being maintained by PickNik Robotics
-* Contributors: Alex Moriarty, Cory Crean
diff --git a/mirage/mirage/ros_ws/src/robotiq_controllers/controller_plugins.xml b/mirage/mirage/ros_ws/src/robotiq_controllers/controller_plugins.xml
deleted file mode 100644
index 9c5c8f5..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_controllers/controller_plugins.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
- This controller provides an interface to (re-)activate the Robotiq gripper.
-
-
-
diff --git a/mirage/mirage/ros_ws/src/robotiq_controllers/include/robotiq_controllers/robotiq_activation_controller.hpp b/mirage/mirage/ros_ws/src/robotiq_controllers/include/robotiq_controllers/robotiq_activation_controller.hpp
deleted file mode 100644
index f922400..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_controllers/include/robotiq_controllers/robotiq_activation_controller.hpp
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2022 PickNik, Inc.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// * Neither the name of the {copyright_holder} nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-
-#pragma once
-
-#include "controller_interface/controller_interface.hpp"
-#include "std_srvs/srv/trigger.hpp"
-
-namespace robotiq_controllers
-{
-class RobotiqActivationController : public controller_interface::ControllerInterface
-{
-public:
- controller_interface::InterfaceConfiguration command_interface_configuration() const override;
-
- controller_interface::InterfaceConfiguration state_interface_configuration() const override;
-
- controller_interface::return_type update(
- const rclcpp::Time & time, const rclcpp::Duration & period) override;
-
- CallbackReturn on_activate(const rclcpp_lifecycle::State & previous_state) override;
-
- CallbackReturn on_deactivate(const rclcpp_lifecycle::State & previous_state) override;
-
- CallbackReturn on_init() override;
-
-private:
- bool reactivateGripper(
- std_srvs::srv::Trigger::Request::SharedPtr req,
- std_srvs::srv::Trigger::Response::SharedPtr resp);
-
- static constexpr double ASYNC_WAITING = 2.0;
- enum CommandInterfaces
- {
- REACTIVATE_GRIPPER_CMD,
- REACTIVATE_GRIPPER_RESPONSE
- };
-
- rclcpp::Service::SharedPtr reactivate_gripper_srv_;
-};
-} // namespace robotiq_controllers
diff --git a/mirage/mirage/ros_ws/src/robotiq_controllers/package.xml b/mirage/mirage/ros_ws/src/robotiq_controllers/package.xml
deleted file mode 100644
index 5ee3a47..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_controllers/package.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
- robotiq_controllers
- 0.0.1
- Controllers for the Robotiq gripper.
- Alex Moriarty
- Marq Rasumessen
- BSD-3-Clause
- Cory Crean
-
- ament_cmake
-
- controller_interface
- std_srvs
-
- ament_lint_auto
- ament_lint_common
-
-
- ament_cmake
-
-
diff --git a/mirage/mirage/ros_ws/src/robotiq_controllers/src/robotiq_activation_controller.cpp b/mirage/mirage/ros_ws/src/robotiq_controllers/src/robotiq_activation_controller.cpp
deleted file mode 100644
index 03c4fa8..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_controllers/src/robotiq_activation_controller.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (c) 2022 PickNik, Inc.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// * Neither the name of the {copyright_holder} nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-
-#include "robotiq_controllers/robotiq_activation_controller.hpp"
-
-namespace robotiq_controllers
-{
-controller_interface::InterfaceConfiguration
-RobotiqActivationController::command_interface_configuration() const
-{
- controller_interface::InterfaceConfiguration config;
- config.type = controller_interface::interface_configuration_type::INDIVIDUAL;
-
- config.names.emplace_back("reactivate_gripper/reactivate_gripper_cmd");
- config.names.emplace_back("reactivate_gripper/reactivate_gripper_response");
-
- return config;
-}
-
-controller_interface::InterfaceConfiguration
-RobotiqActivationController::state_interface_configuration() const
-{
- controller_interface::InterfaceConfiguration config;
- config.type = controller_interface::interface_configuration_type::INDIVIDUAL;
-
- return config;
-}
-
-controller_interface::return_type RobotiqActivationController::update(
- const rclcpp::Time & /*time*/, const rclcpp::Duration & /*period*/)
-{
- return controller_interface::return_type::OK;
-}
-
-rclcpp_lifecycle::node_interfaces::LifecycleNodeInterface::CallbackReturn
-RobotiqActivationController::on_activate(const rclcpp_lifecycle::State & /*previous_state*/)
-{
- // Check command interfaces.
- if (command_interfaces_.size() != 2) {
- RCLCPP_ERROR(
- get_node()->get_logger(), "Expected %d command interfaces, but got %zu.", 2,
- command_interfaces_.size());
- return rclcpp_lifecycle::node_interfaces::LifecycleNodeInterface::CallbackReturn::ERROR;
- }
-
- try {
- // Create service for re-activating the gripper.
- reactivate_gripper_srv_ = get_node()->create_service(
- "~/reactivate_gripper",
- [this](
- std_srvs::srv::Trigger::Request::SharedPtr req,
- std_srvs::srv::Trigger::Response::SharedPtr resp) { this->reactivateGripper(req, resp); });
- } catch (...) {
- return LifecycleNodeInterface::CallbackReturn::ERROR;
- }
- return LifecycleNodeInterface::CallbackReturn::SUCCESS;
-}
-
-rclcpp_lifecycle::node_interfaces::LifecycleNodeInterface::CallbackReturn
-RobotiqActivationController::on_deactivate(const rclcpp_lifecycle::State & /*previous_state*/)
-{
- try {
- reactivate_gripper_srv_.reset();
- } catch (...) {
- return LifecycleNodeInterface::CallbackReturn::ERROR;
- }
-
- return LifecycleNodeInterface::CallbackReturn::SUCCESS;
-}
-
-rclcpp_lifecycle::node_interfaces::LifecycleNodeInterface::CallbackReturn
-RobotiqActivationController::on_init()
-{
- return LifecycleNodeInterface::CallbackReturn::SUCCESS;
-}
-
-bool RobotiqActivationController::reactivateGripper(
- std_srvs::srv::Trigger::Request::SharedPtr /*req*/,
- std_srvs::srv::Trigger::Response::SharedPtr resp)
-{
- command_interfaces_[REACTIVATE_GRIPPER_RESPONSE].set_value(ASYNC_WAITING);
- command_interfaces_[REACTIVATE_GRIPPER_CMD].set_value(1.0);
-
- while (command_interfaces_[REACTIVATE_GRIPPER_RESPONSE].get_value() == ASYNC_WAITING) {
- std::this_thread::sleep_for(std::chrono::milliseconds(50));
- }
- resp->success = command_interfaces_[REACTIVATE_GRIPPER_RESPONSE].get_value();
-
- return resp->success;
-}
-} // namespace robotiq_controllers
-
-#include "pluginlib/class_list_macros.hpp"
-
-PLUGINLIB_EXPORT_CLASS(
- robotiq_controllers::RobotiqActivationController, controller_interface::ControllerInterface)
diff --git a/mirage/mirage/ros_ws/src/robotiq_description/CHANGELOG.rst b/mirage/mirage/ros_ws/src/robotiq_description/CHANGELOG.rst
deleted file mode 100644
index 8ece7f4..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_description/CHANGELOG.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Changelog for package robotiq_description
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-0.0.1 (2023-07-17)
-------------------
-* Initial ROS 2 release of robotiq_description
- * includes support for Robotiq 2F 85
- * This package is not supported by Robotiq but is being maintained by PickNik Robotics
-* Contributors: Alex Moriarty, Anthony Baker, Chance Cardona, Cory Crean, Erik Holum, Marq Rasmussen, Sakai Hibiki, Sebastian Castro, marqrazz
diff --git a/mirage/mirage/ros_ws/src/robotiq_description/launch/view_gripper.launch.py b/mirage/mirage/ros_ws/src/robotiq_description/launch/view_gripper.launch.py
deleted file mode 100644
index 776ddd9..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_description/launch/view_gripper.launch.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright (c) 2022 PickNik, Inc.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# * Neither the name of the {copyright_holder} nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-import launch
-from launch.substitutions import (
- Command,
- FindExecutable,
- LaunchConfiguration,
- PathJoinSubstitution,
-)
-import launch_ros
-import os
-
-
-def generate_launch_description():
- pkg_share = launch_ros.substitutions.FindPackageShare(
- package="robotiq_description"
- ).find("robotiq_description")
- default_model_path = os.path.join(
- pkg_share, "urdf", "robotiq_2f_85_gripper.urdf.xacro"
- )
- default_rviz_config_path = os.path.join(pkg_share, "rviz", "view_urdf.rviz")
-
- args = []
- args.append(
- launch.actions.DeclareLaunchArgument(
- name="model",
- default_value=default_model_path,
- description="Absolute path to gripper URDF file",
- )
- )
- args.append(
- launch.actions.DeclareLaunchArgument(
- name="rvizconfig",
- default_value=default_rviz_config_path,
- description="Absolute path to rviz config file",
- )
- )
-
- robot_description_content = Command(
- [
- PathJoinSubstitution([FindExecutable(name="xacro")]),
- " ",
- LaunchConfiguration("model"),
- ]
- )
- robot_description_param = {
- "robot_description": launch_ros.parameter_descriptions.ParameterValue(
- robot_description_content, value_type=str
- )
- }
-
- robot_state_publisher_node = launch_ros.actions.Node(
- package="robot_state_publisher",
- executable="robot_state_publisher",
- parameters=[robot_description_param],
- )
-
- joint_state_publisher_node = launch_ros.actions.Node(
- package="joint_state_publisher_gui",
- executable="joint_state_publisher_gui",
- )
-
- rviz_node = launch_ros.actions.Node(
- package="rviz2",
- executable="rviz2",
- name="rviz2",
- output="screen",
- arguments=["-d", LaunchConfiguration("rvizconfig")],
- )
-
- nodes = [
- robot_state_publisher_node,
- joint_state_publisher_node,
- rviz_node,
- ]
-
- return launch.LaunchDescription(args + nodes)
diff --git a/mirage/mirage/ros_ws/src/robotiq_description/package.xml b/mirage/mirage/ros_ws/src/robotiq_description/package.xml
deleted file mode 100644
index 18611bc..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_description/package.xml
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
-
- robotiq_description
- 0.0.1
- URDF and xacro description package for the Robotiq gripper.
- Cory Crean
- Alex Moriarty
- Marq Rasumessen
- BSD
-
- ament_cmake
-
- joint_state_publisher_gui
- launch
- launch_ros
- robot_state_publisher
- rviz2
- urdf
- xacro
-
- ament_lint_auto
- ament_lint_common
-
-
- ament_cmake
-
-
diff --git a/mirage/mirage/ros_ws/src/robotiq_description/rviz/view_urdf.rviz b/mirage/mirage/ros_ws/src/robotiq_description/rviz/view_urdf.rviz
deleted file mode 100644
index c23ac44..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_description/rviz/view_urdf.rviz
+++ /dev/null
@@ -1,234 +0,0 @@
-Panels:
- - Class: rviz_common/Displays
- Help Height: 78
- Name: Displays
- Property Tree Widget:
- Expanded:
- - /Status1
- Splitter Ratio: 0.6264705657958984
- Tree Height: 555
- - Class: rviz_common/Selection
- Name: Selection
- - Class: rviz_common/Tool Properties
- Expanded:
- - /2D Goal Pose1
- - /Publish Point1
- Name: Tool Properties
- Splitter Ratio: 0.5886790156364441
- - Class: rviz_common/Views
- Expanded:
- - /Current View1
- Name: Views
- Splitter Ratio: 0.5
- - Class: rviz_common/Time
- Experimental: false
- Name: Time
- SyncMode: 0
- SyncSource: ""
-Visualization Manager:
- Class: ""
- Displays:
- - Alpha: 0.5
- Cell Size: 1
- Class: rviz_default_plugins/Grid
- Color: 160; 160; 164
- Enabled: true
- Line Style:
- Line Width: 0.029999999329447746
- Value: Lines
- Name: Grid
- Normal Cell Count: 0
- Offset:
- X: 0
- Y: 0
- Z: 0
- Plane: XY
- Plane Cell Count: 10
- Reference Frame:
- Value: true
- - Alpha: 1
- Class: rviz_default_plugins/RobotModel
- Collision Enabled: false
- Description File: ""
- Description Source: Topic
- Description Topic:
- Depth: 5
- Durability Policy: Volatile
- History Policy: Keep Last
- Reliability Policy: Reliable
- Value: robot_description
- Enabled: true
- Links:
- All Links Enabled: true
- Expand Joint Details: false
- Expand Link Details: false
- Expand Tree: false
- Link Tree Style: Links in Alphabetic Order
- arm_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- base_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- dummy_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- end_effector_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- forearm_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- gripper_base_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- left_finger_dist_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- left_finger_prox_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- lower_wrist_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- right_finger_dist_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- right_finger_prox_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- shoulder_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- table:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- tool_frame:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- upper_wrist_link:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Value: true
- world:
- Alpha: 1
- Show Axes: false
- Show Trail: false
- Mass Properties:
- Inertia: false
- Mass: false
- Name: RobotModel
- TF Prefix: ""
- Update Interval: 0
- Value: true
- Visual Enabled: true
- Enabled: true
- Global Options:
- Background Color: 48; 48; 48
- Fixed Frame: world
- Frame Rate: 30
- Name: root
- Tools:
- - Class: rviz_default_plugins/Interact
- Hide Inactive Objects: true
- - Class: rviz_default_plugins/MoveCamera
- - Class: rviz_default_plugins/Select
- - Class: rviz_default_plugins/FocusCamera
- - Class: rviz_default_plugins/Measure
- Line color: 128; 128; 0
- - Class: rviz_default_plugins/SetInitialPose
- Covariance x: 0.25
- Covariance y: 0.25
- Covariance yaw: 0.06853891909122467
- Topic:
- Depth: 5
- Durability Policy: Volatile
- History Policy: Keep Last
- Reliability Policy: Reliable
- Value: /initialpose
- - Class: rviz_default_plugins/SetGoal
- Topic:
- Depth: 5
- Durability Policy: Volatile
- History Policy: Keep Last
- Reliability Policy: Reliable
- Value: /goal_pose
- - Class: rviz_default_plugins/PublishPoint
- Single click: true
- Topic:
- Depth: 5
- Durability Policy: Volatile
- History Policy: Keep Last
- Reliability Policy: Reliable
- Value: /clicked_point
- Transformation:
- Current:
- Class: rviz_default_plugins/TF
- Value: true
- Views:
- Current:
- Class: rviz_default_plugins/Orbit
- Distance: 2.1567115783691406
- Enable Stereo Rendering:
- Stereo Eye Separation: 0.05999999865889549
- Stereo Focal Distance: 1
- Swap Stereo Eyes: false
- Value: false
- Focal Point:
- X: -0.09681572020053864
- Y: -0.10843408107757568
- Z: 0.1451336145401001
- Focal Shape Fixed Size: true
- Focal Shape Size: 0.05000000074505806
- Invert Z Axis: false
- Name: Current View
- Near Clip Distance: 0.009999999776482582
- Pitch: 0.785398006439209
- Target Frame:
- Value: Orbit (rviz)
- Yaw: 0.785398006439209
- Saved: ~
-Window Geometry:
- Displays:
- collapsed: false
- Height: 846
- Hide Left Dock: false
- Hide Right Dock: false
- QMainWindow State: 000000ff00000000fd000000040000000000000156000002b4fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000002b4000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002b4fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003b000002b4000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004b00000003efc0100000002fb0000000800540069006d00650100000000000004b00000024400fffffffb0000000800540069006d006501000000000000045000000000000000000000023f000002b400000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
- Selection:
- collapsed: false
- Time:
- collapsed: false
- Tool Properties:
- collapsed: false
- Views:
- collapsed: false
- Width: 1200
- X: 1989
- Y: 261
diff --git a/mirage/mirage/ros_ws/src/robotiq_description/urdf/robotiq_2f_85_gripper.urdf.xacro b/mirage/mirage/ros_ws/src/robotiq_description/urdf/robotiq_2f_85_gripper.urdf.xacro
deleted file mode 100644
index cbd9522..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_description/urdf/robotiq_2f_85_gripper.urdf.xacro
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/robotiq_description/urdf/robotiq_2f_85_macro.urdf.xacro b/mirage/mirage/ros_ws/src/robotiq_description/urdf/robotiq_2f_85_macro.urdf.xacro
deleted file mode 100644
index cbac6eb..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_description/urdf/robotiq_2f_85_macro.urdf.xacro
+++ /dev/null
@@ -1,293 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 100000.0
- 100000.0
-
-
-
-
- 1e+5
- 1
- 0
- 0.2
- 0.002
- 0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 100000.0
- 100000.0
-
-
-
-
- 1e+5
- 1
- 0
- 0.2
- 0.002
- 0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/robotiq_description/urdf/robotiq_gripper.ros2_control.xacro b/mirage/mirage/ros_ws/src/robotiq_description/urdf/robotiq_gripper.ros2_control.xacro
deleted file mode 100644
index b8d3e29..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_description/urdf/robotiq_gripper.ros2_control.xacro
+++ /dev/null
@@ -1,108 +0,0 @@
-
-
-
-
-
-
-
-
-
- gazebo_ros2_control/GazeboSystem
-
-
-
-
-
-
-
- 0
-
-
-
-
-
-
- ${prefix}robotiq_85_left_knuckle_joint
- -1
-
-
-
-
-
-
-
- ${prefix}robotiq_85_left_knuckle_joint
- 1
-
-
-
-
-
-
-
- ${prefix}robotiq_85_left_knuckle_joint
- -1
-
-
-
-
-
-
-
- ${prefix}robotiq_85_left_knuckle_joint
- -1
-
-
-
-
-
-
-
- ${prefix}robotiq_85_left_knuckle_joint
- 1
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/robotiq_description/urdf/test_gripper.urdf.xacro b/mirage/mirage/ros_ws/src/robotiq_description/urdf/test_gripper.urdf.xacro
deleted file mode 100644
index 56ab163..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_description/urdf/test_gripper.urdf.xacro
+++ /dev/null
@@ -1,321 +0,0 @@
-
-
-
-
-
-
-
-
-
- gazebo_ros2_control/GazeboSystem
-
-
-
- {0}
- {0.8}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-pi}
- {pi}
-
-
- 0.0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
- {-2*pi}
- {2*pi}
-
-
- 0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 100000.0
- 100000.0
-
-
-
-
- 1e+5
- 1
- 0
- 0.2
- 0.002
- 0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 100000.0
- 100000.0
-
-
-
-
- 1e+5
- 1
- 0
- 0.2
- 0.002
- 0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/robotiq_description/urdf/ur_to_robotiq_adapter.urdf.xacro b/mirage/mirage/ros_ws/src/robotiq_description/urdf/ur_to_robotiq_adapter.urdf.xacro
deleted file mode 100644
index 188fe7d..0000000
--- a/mirage/mirage/ros_ws/src/robotiq_description/urdf/ur_to_robotiq_adapter.urdf.xacro
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/mirage/mirage/ros_ws/src/ros_gazebo_setup.bash b/mirage/mirage/ros_ws/src/ros_gazebo_setup.bash
new file mode 100644
index 0000000..0613397
--- /dev/null
+++ b/mirage/mirage/ros_ws/src/ros_gazebo_setup.bash
@@ -0,0 +1,48 @@
+#!/bin/bash
+# Step 1: Install ROS2 Humble Full: https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html
+
+# Step 1.1 Set Locale
+sudo apt update -y
+sudo apt install locales
+sudo locale-gen en_US en_US.UTF-8
+sudo update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
+export LANG=en_US.UTF-8
+
+# Step 1.2 Setup Sources
+sudo apt install software-properties-common
+echo -ne '\n' | sudo add-apt-repository universe
+sudo apt update && sudo apt install curl -y
+sudo curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg
+echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(. /etc/os-release && echo $UBUNTU_CODENAME) main" | sudo tee /etc/apt/sources.list.d/ros2.list > /dev/null
+
+# Step 1.3 Install ROS2 Packages
+sudo apt update -y
+sudo apt upgrade -y
+sudo apt install ros-humble-desktop-full -y
+echo "source /opt/ros/humble/setup.bash" >> ~/.bashrc
+
+# Step 2: Setup ROS2 Control: https://control.ros.org/humble/doc/ros2_control_demos/doc/index.html
+mkdir -p ~/ros2_ws/src
+cd ~/ros2_ws/src
+git clone https://github.com/ros-controls/ros2_control_demos -b humble
+cd ~/ros2_ws/
+sudo apt-get update
+sudo rosdep install --from-paths ./ -i -y --rosdistro ${ROS_DISTRO}
+cd ~/ros2_ws/
+. /opt/ros/${ROS_DISTRO}/setup.sh
+colcon build --merge-install
+
+# Step 3: Tracikpy Installation: https://github.com/mjd3/tracikpy
+sudo apt-get install libeigen3-dev liborocos-kdl-dev libkdl-parser-dev liburdfdom-dev libnlopt-dev
+cd ~/
+git clone https://github.com/mjd3/tracikpy.git
+pip install tracikpy/
+
+# Step 4: Make sure Gazebo plugins can be found: https://classic.gazebosim.org/tutorials?tut=guided_i5
+echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/mirage/xembody/xembody/src/ros_ws/build/gazebo_env" >> ~/.bashrc
+
+# Step 5: Compile workspace
+cd ~/mirage/xembody/xembody/src/ros_ws
+colcon build
+. install/setup.bash
+source ~/.bashrc
\ No newline at end of file
diff --git a/mirage/mirage/ros_ws/src/xembody_publisher/package.xml b/mirage/mirage/ros_ws/src/xembody_publisher/package.xml
deleted file mode 100644
index 06e1879..0000000
--- a/mirage/mirage/ros_ws/src/xembody_publisher/package.xml
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-
- xembody_publisher
- 0.0.0
- TODO: Package description
- lawrence
- TODO: License declaration
-
-
- ament_python
-
-
diff --git a/mirage/mirage/ros_ws/src/xembody_publisher/setup.cfg b/mirage/mirage/ros_ws/src/xembody_publisher/setup.cfg
deleted file mode 100644
index bbd0a9d..0000000
--- a/mirage/mirage/ros_ws/src/xembody_publisher/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[develop]
-script_dir=$base/lib/xembody_publisher
-[install]
-install_scripts=$base/lib/xembody_publisher
diff --git a/mirage/mirage/ros_ws/src/xembody_publisher/setup.py b/mirage/mirage/ros_ws/src/xembody_publisher/setup.py
deleted file mode 100644
index dda2ca4..0000000
--- a/mirage/mirage/ros_ws/src/xembody_publisher/setup.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from setuptools import setup
-
-package_name = 'xembody_publisher'
-
-setup(
- name=package_name,
- version='0.0.0',
- packages=[package_name],
- data_files=[
- ('share/' + package_name, ['package.xml']),
- ],
- install_requires=['setuptools'],
- zip_safe=True,
- maintainer='lawrence',
- maintainer_email='chenyunliang12345@gmail.com',
- description='TODO: Package description',
- license='TODO: License declaration',
- tests_require=['pytest'],
- entry_points={
- 'console_scripts': [
- 'xembody_publisher = xembody_publisher.xembody_publisher:main'
- ],
- },
-)
diff --git a/mirage/mirage/ros_ws/src/xembody_publisher/xembody_publisher/__init__.py b/mirage/mirage/ros_ws/src/xembody_publisher/xembody_publisher/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/mirage/mirage/ros_ws/src/xembody_publisher/xembody_publisher/xembody_publisher.py b/mirage/mirage/ros_ws/src/xembody_publisher/xembody_publisher/xembody_publisher.py
deleted file mode 100644
index 057a6df..0000000
--- a/mirage/mirage/ros_ws/src/xembody_publisher/xembody_publisher/xembody_publisher.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from mirage.infra.ros_inpaint_publisher_sim import ROSInpaintPublisherSim
-import numpy as np
-import threading
-import os
-import time
-import rclpy
-import pickle
-
-def start_publisher(data_file_path):
- ros_inpaint_publisher = ROSInpaintPublisherSim()
- spin_thread = threading.Thread(target=rclpy.spin, args=(ros_inpaint_publisher.node,)).start()
-
- data_file_path = os.path.join(data_file_path, 'data.pkl')
- data_result_file_path = os.path.join(data_file_path, 'return_data.pkl')
- while True:
- if os.path.exists(data_file_path):
- with open(data_file_path, 'rb') as f:
- data = pickle.load(f)
- import pdb; pdb.set_trace()
- rgb_image = data['rgb_image']
- point_cloud = data['point_cloud']
- segmentation_mask = data['segmentation_mask']
- joint_angles = data['joint_angles']
- ros_inpaint_publisher.publish_to_ros_node(rgb_image, point_cloud, segmentation_mask, joint_angles)
- os.remove(data_file_path)
-
- # Now, wait for the result and write it to a data file
- inpainted_image = ros_inpaint_publisher.get_inpainted_image(True)
- np.save(data_result_file_path, {'inpainted_image': inpainted_image})
-
- time.sleep(0.01)
-
- spin_thread.join()
-
-def main():
- data_file_path = os.path.join('/home/lawrence/xembody/xembody/xembody/src/orbit/data_exchange_dir')
- start_publisher(data_file_path)
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/robosuite b/robosuite
index 93e2881..9bc1610 160000
--- a/robosuite
+++ b/robosuite
@@ -1 +1 @@
-Subproject commit 93e288194c857e036bb682a1ecc1529ae798ceaa
+Subproject commit 9bc1610fbd93e7c4b25e916dfdab7fb28f8a4233