diff --git a/main.py b/main.py index 05efa29..4b79ca4 100644 --- a/main.py +++ b/main.py @@ -15,10 +15,8 @@ def main(): pbutils = PyBUtils(renders=True) - - robot = Robot( - pbclient=pbutils.pbclient, position=[0, 1, 0], orientation=[0, 0, 0, 1] - ) + + robot = Robot(pbclient=pbutils.pbclient, position=[0, 1, 0], orientation=[0, 0, 0, 1]) penv = PruningEnv( pbutils=pbutils, @@ -26,10 +24,9 @@ def main(): ) _1_inch = 0.0254 - penv.activate_shape(shape="cylinder", radius=_1_inch * 2 , height=2.85, orientation=[0, np.pi / 2, 0]) + penv.activate_shape(shape="cylinder", radius=_1_inch * 2, height=2.85, orientation=[0, np.pi / 2, 0]) # penv.activate_shape(shape="cylinder", radius=0.01, height=2.85, orientation=[0, np.pi / 2, 0]) - - + # penv.load_tree( # pbutils=pbutils, # scale=1.0, @@ -48,7 +45,7 @@ def main(): time.sleep(0.1) # log.debug(robot.sensors['tof0'].tf_frame) - + while True: try: # log.debug(f"{robot.sensors['tof0']}") @@ -62,7 +59,7 @@ def main(): ) # tof0_view_matrix = np.asarray(tof0_view_matrix).reshape((4, 4), order="F") # log.debug(f"{tof0_view_matrix[:3, 3]}") - + keys_pressed = penv.get_key_pressed() action = penv.get_key_action(robot=robot, keys_pressed=keys_pressed) action = action.reshape((6, 1)) diff --git a/pybullet_tree_sim/camera.py b/pybullet_tree_sim/camera.py index ecc6a3e..5c0d067 100644 --- a/pybullet_tree_sim/camera.py +++ b/pybullet_tree_sim/camera.py @@ -55,6 +55,7 @@ def __init__(self, pbclient, sensor_name: str, sensor_type: str = "camera") -> N def main(): import pprint as pp + pbutils = PyBUtils(renders=False) camera = Camera(pbutils, sensor_name="realsense_d435i") pp.pprint(camera.depth_pixel_coords) diff --git a/pybullet_tree_sim/config/description/tof/vl6180.yaml b/pybullet_tree_sim/config/description/tof/vl6180.yaml index b0d0d1e..42f1af4 100644 --- a/pybullet_tree_sim/config/description/tof/vl6180.yaml +++ b/pybullet_tree_sim/config/description/tof/vl6180.yaml @@ -3,6 +3,7 @@ data_type: "d" depth: dfov: 25.0 near_plane: 0.002 - far_plane: 0.150 + # far_plane: 0.150 + far_plane: 0.40 width: 1 height: 1 diff --git a/pybullet_tree_sim/config/runtime/mock_pruner.yaml b/pybullet_tree_sim/config/runtime/mock_pruner.yaml index aa44265..edfca8d 100644 --- a/pybullet_tree_sim/config/runtime/mock_pruner.yaml +++ b/pybullet_tree_sim/config/runtime/mock_pruner.yaml @@ -8,14 +8,14 @@ sensors: tilt: 0.0 tof0: type: tof - name: vl53l8cx + name: vl6180 # x_offset: "0.02" tf_frame: "tof0" pan: 0.0 tilt: 0.0 tof1: type: tof - name: vl53l8cx + name: vl6180 # x_offset: "-0.02" tf_frame: "tof1" pan: 0.0 diff --git a/pybullet_tree_sim/plot.py b/pybullet_tree_sim/plot.py index 37c15dc..557f547 100644 --- a/pybullet_tree_sim/plot.py +++ b/pybullet_tree_sim/plot.py @@ -3,22 +3,25 @@ import modern_robotics as mr from zenlog import log import numpy as np +import pprint as pp + def debug_sensor_world_data(data): + pp.pprint(data) fig = go.Figure() for tof_name, tof_data in data.items(): # log.warn(tof_data) fig.add_trace( go.Scatter3d( - x=tof_data['data'][:, 0], - y=tof_data['data'][:, 1], - z=tof_data['data'][:, 2], + x=tof_data["data"][:, 0], + y=tof_data["data"][:, 1], + z=tof_data["data"][:, 2], mode="markers", marker=dict(size=2), name=tof_name, ) ) - inv_view_matrix = mr.TransInv(tof_data['view_matrix']) + inv_view_matrix = mr.TransInv(tof_data["view_matrix"]) fig.add_trace( go.Scatter3d( x=[inv_view_matrix[0, 3]], @@ -29,7 +32,7 @@ def debug_sensor_world_data(data): marker=dict(size=5), ) ) - + fig.update_layout( title=f"World Data", scene=dict( @@ -46,17 +49,12 @@ def debug_sensor_world_data(data): ) fig.show() return - def debug_deproject_pixels_to_points(sensor, data, cam_coords, world_coords, view_matrix): hovertemplate = "id: %{id}
x: %{x}
y: %{y}
z: %{z}" - # _data = data.reshape([sensor.depth_width, sensor.depth_height], order="F") - # _data = _data.reshape((sensor.depth_width * sensor.depth_height, 1), order="C") - print(np.array(list(range(8)) * 8).reshape((8, 8), order="C").flatten(order="F")) - np.array(list(range(8)) * 8) fig = go.Figure( data=[ go.Scatter3d( @@ -65,9 +63,6 @@ def debug_deproject_pixels_to_points(sensor, data, cam_coords, world_coords, vie # y=np.array([list(range(8))] * 8).T.flatten(order="F"), z=data.flatten(), mode="markers", - # ids=np.array([f"{i}" for i in range(sensor.depth_width * sensor.depth_height)]) - # .reshape((8, 8), order="C") - # .flatten(order="F"), ids=[f"{i}" for i in range(sensor.depth_width * sensor.depth_height)], hovertemplate=hovertemplate, ) @@ -90,7 +85,7 @@ def debug_deproject_pixels_to_points(sensor, data, cam_coords, world_coords, vie go.Scatter3d( x=sensor.depth_film_coords[:, 0], y=sensor.depth_film_coords[:, 1], - z= data.flatten(order="F"), + z=data.flatten(order="F"), mode="markers", ids=[f"{i}" for i in range(sensor.depth_width * sensor.depth_height)], hovertemplate=hovertemplate, diff --git a/pybullet_tree_sim/pruning_environment.py b/pybullet_tree_sim/pruning_environment.py index 833d92f..b6f6839 100644 --- a/pybullet_tree_sim/pruning_environment.py +++ b/pybullet_tree_sim/pruning_environment.py @@ -346,8 +346,8 @@ def deproject_pixels_to_points( # https://stackoverflow.com/questions/4124041/is-opengl-coordinate-system-left-handed-or-right-handed # https://github.com/bitlw/LearnProjMatrix/blob/main/doc/OpenGL_Projection.md#introduction # view_matrix[1:3, :] = -view_matrix[1:3, :] - # - + # + proj_matrix = np.asarray(camera.depth_proj_mat).reshape([4, 4], order="F") # log.warning(f'{proj_matrix}') # proj_matrix = camera.depth_proj_mat @@ -358,14 +358,14 @@ def deproject_pixels_to_points( # Get camera intrinsics from projection matrix. If square camera, these should be the same. fx = proj_matrix[0, 0] fy = proj_matrix[1, 1] # if square camera, these should be the same - + # Get camera coordinates from film-plane coordinates. Scale, add z (depth), then homogenize the matrix. cam_coords = np.divide(np.multiply(camera.depth_film_coords, data), [fx, fy]) cam_coords = np.concatenate((cam_coords, data, np.ones((camera.depth_width * camera.depth_height, 1))), axis=1) if return_frame.strip().lower() == "camera": return cam_coords - + log.warning(f"view_matrix:\n{view_matrix}") world_coords = (mr.TransInv(view_matrix) @ cam_coords.T).T @@ -514,31 +514,22 @@ def get_key_action(self, robot: Robot, keys_pressed: list) -> np.ndarray: log.warning(f"button p pressed") for name, sensor in robot.sensors.items(): # Get view and projection matrices - if name.startswith('tof'): + if name.startswith("tof"): log.error(name) view_matrix = robot.get_view_mat_at_curr_pose(camera=sensor) # log.error(view_matrix) - + rgb, depth = self.pbutils.get_rgbd_at_cur_pose( camera=sensor, type="robot", view_matrix=view_matrix ) - view_matrix = np.asarray(view_matrix).reshape([4, 4], order="F") + view_matrix = np.asarray(view_matrix).reshape([4, 4], order="F") depth = depth.reshape((sensor.depth_width * sensor.depth_height, 1), order="F") - + world_points = self.deproject_pixels_to_points( - camera=sensor, - data=depth, - view_matrix=view_matrix, - return_frame='world', - debug=False + camera=sensor, data=depth, view_matrix=view_matrix, return_frame="world", debug=False ) - - data.update({name: { - 'data': world_points, - 'view_matrix': view_matrix, - 'sensor': sensor - } - }) + + data.update({name: {"data": world_points, "view_matrix": view_matrix, "sensor": sensor}}) self.last_button_push_time = time.time() plot.debug_sensor_world_data( data=data, @@ -557,7 +548,7 @@ def get_key_action(self, robot: Robot, keys_pressed: list) -> np.ndarray: pass else: - action = np.array([0.0, 0.0, 0, 0.0, 0.0, 0.0]) + action = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) keys_pressed = [] return action @@ -567,7 +558,7 @@ def main(): from pybullet_tree_sim.utils.pyb_utils import PyBUtils import numpy as np import secrets - + pbutils = PyBUtils(renders=False) penv = PruningEnv(pbutils=pbutils) tof0 = TimeOfFlight(pbclient=pbutils.pbclient, sensor_name="vl53l8cx") @@ -577,27 +568,20 @@ def main(): # data[0,0] = 0.31 depth_data[:, :] = tuple(generator.uniform(0.31, 0.35, (tof0.depth_width, tof0.depth_height))) - start = 0.31 stop = 0.35 # Depth data IRL comes in as a C-format nx1 array. start with this IRL - depth_data[:, 3:5] = np.array([np.arange(start, stop, (stop - start) / 8), np.arange(start, stop, (stop - start) / 8)]).T + depth_data[:, 3:5] = np.array( + [np.arange(start, stop, (stop - start) / 8), np.arange(start, stop, (stop - start) / 8)] + ).T depth_data[-1, 3] = 0.31 # Switch to F-format depth_data = depth_data.reshape((tof0.depth_width * tof0.depth_height, 1), order="F") - + view_matrix = np.identity(4) - view_matrix[:3, 3] = -1 * np.array([0,0,1]) - - world_points = penv.deproject_pixels_to_points( - camera=tof0, - data=depth_data, - view_matrix=view_matrix, - debug=True - ) - - - + view_matrix[:3, 3] = -1 * np.array([0, 0, 1]) + + world_points = penv.deproject_pixels_to_points(camera=tof0, data=depth_data, view_matrix=view_matrix, debug=True) # log.warning(f"joint angles: {penv.ur5.get_joint_angles()}") diff --git a/pybullet_tree_sim/robot.py b/pybullet_tree_sim/robot.py index 1b2d305..ad5279a 100644 --- a/pybullet_tree_sim/robot.py +++ b/pybullet_tree_sim/robot.py @@ -140,16 +140,18 @@ def _get_joints(self) -> dict: for i in range(self.num_joints): info = self.pbclient.getJointInfo(self.robot, i) joint_name = info[1].decode("utf-8") - joints.update({ - joint_name: { - "id": i, - 'type': info[2], - 'lower_limit': info[8], - 'upper_limit': info[9], - 'max_force': info[10], - 'max_velocity': info[11] + joints.update( + { + joint_name: { + "id": i, + "type": info[2], + "lower_limit": info[8], + "upper_limit": info[9], + "max_force": info[10], + "max_velocity": info[11], + } } - }) + ) return joints def _assign_control_joints(self, joints: dict) -> list: @@ -157,9 +159,9 @@ def _assign_control_joints(self, joints: dict) -> list: control_joints = [] control_joint_idxs = [] for joint, joint_info in joints.items(): - if joint_info['type'] == 0: + if joint_info["type"] == 0: control_joints.append(joint) - control_joint_idxs.append(joint_info['id']) + control_joint_idxs.append(joint_info["id"]) return control_joints, control_joint_idxs def _get_links(self) -> dict: @@ -179,14 +181,21 @@ def _assign_collision_links(self) -> list: if i == 0: continue else: - if robot_part+'__base' in self.links.keys() and self.robot_conf['robot_stack'][i-1]+'__tool0' in self.links.keys(): - robot_collision_filter_idxs.append((self.links[robot_part+'__base'], self.links[self.robot_conf['robot_stack'][i-1]+'__tool0'])) + if ( + robot_part + "__base" in self.links.keys() + and self.robot_conf["robot_stack"][i - 1] + "__tool0" in self.links.keys() + ): + robot_collision_filter_idxs.append( + ( + self.links[robot_part + "__base"], + self.links[self.robot_conf["robot_stack"][i - 1] + "__tool0"], + ) + ) return robot_collision_filter_idxs def _get_tool0_link_idx(self): - return self.links[self.robot_conf['robot_stack'][-1] + '__tool0'] - + return self.links[self.robot_conf["robot_stack"][-1] + "__tool0"] # def _get_sensor_attributes(self) -> dict: # # TODO: refactor to only load required sensors @@ -215,22 +224,24 @@ def _get_sensors(self) -> dict: robot_part_runtime_conf_file = os.path.join(robot_part_runtime_conf_path, f"{robot_part}.yaml") robot_part_conf = yutils.load_yaml(robot_part_runtime_conf_file) if robot_part_conf is None: - log.warn(f'Could not load configuration for {robot_part_runtime_conf_file}') + log.warn(f"Could not load configuration for {robot_part_runtime_conf_file}") continue try: - sensor_conf = robot_part_conf['sensors'] + sensor_conf = robot_part_conf["sensors"] except KeyError: log.warn(f"No sensor configuration found for {robot_part} in {robot_part_runtime_conf_file}") continue # Create sensors for sensor_name, metadata in sensor_conf.items(): - if metadata['type'] == 'camera': - sensors.update({sensor_name: Camera(pbclient=self.pbclient, sensor_name=metadata['name'])}) - elif metadata['type'] == 'tof': - sensors.update({sensor_name: TimeOfFlight(pbclient=self.pbclient, sensor_name=metadata['name'])}) + if metadata["type"] == "camera": + sensors.update({sensor_name: Camera(pbclient=self.pbclient, sensor_name=metadata["name"])}) + elif metadata["type"] == "tof": + sensors.update({sensor_name: TimeOfFlight(pbclient=self.pbclient, sensor_name=metadata["name"])}) # Assign TF frame and pybullet frame id to sensor - sensors[sensor_name].tf_frame = robot_part+'__'+metadata['tf_frame'] # TODO: find a better way to get the prefix. If - # from robot_conf, need standard for all robots + sensors[sensor_name].tf_frame = ( + robot_part + "__" + metadata["tf_frame"] + ) # TODO: find a better way to get the prefix. If + # from robot_conf, need standard for all robots sensors[sensor_name].tf_id = self.links[sensors[sensor_name].tf_frame] # for key, value in yamlcontent.items(): @@ -253,13 +264,12 @@ def _setup_robot(self): self.pos, self.orientation = self.pbclient.multiplyTransforms( self.pos, self.orientation, delta_pos, delta_orientation ) - self.robot = self.pbclient.loadURDF( # TODO: change to PyB_ID, this isn't a robot + self.robot = self.pbclient.loadURDF( # TODO: change to PyB_ID, this isn't a robot self.robot_urdf_path, self.pos, self.orientation, flags=flags, useFixedBase=True ) self.num_joints = self.pbclient.getNumJoints(self.robot) # get link indices dynamically - # # Setup robot info only once # if not self.joints: # self.joints = dict() @@ -326,8 +336,6 @@ def _setup_robot(self): # # import pprint as pp # # pp.pprint(self.robot_conf) - - # # self.init_pos_ee = self.get_current_pose(self.end_effector_index) # # self.init_pos_base = self.get_current_pose(self.base_index) # # self.init_pos_eebase = self.get_current_pose(self.success_link_index) @@ -549,9 +557,9 @@ def get_view_mat_at_curr_pose(self, camera: Camera | TimeOfFlight) -> np.ndarray # log.debug(f"cam vec, up vec:\n{camera_vector}, {up_vector}") view_matrix = self.pbclient.computeViewMatrix( - cameraEyePosition = camera_tf[:3, 3], - cameraTargetPosition = camera_tf[:3, 3] + 0.1 * camera_vector, - cameraUpVector = up_vector + cameraEyePosition=camera_tf[:3, 3], + cameraTargetPosition=camera_tf[:3, 3] + 0.1 * camera_vector, + cameraUpVector=up_vector, ) return view_matrix diff --git a/pybullet_tree_sim/utils/pyb_utils.py b/pybullet_tree_sim/utils/pyb_utils.py index 7e732de..ba69752 100644 --- a/pybullet_tree_sim/utils/pyb_utils.py +++ b/pybullet_tree_sim/utils/pyb_utils.py @@ -154,7 +154,6 @@ def get_image_at_curr_pose(self, camera, type, view_matrix=None) -> List: flags=self.pbclient.ER_NO_SEGMENTATION_MASK, lightDirection=[1, 1, 1], ) - # def setup_bird_view_visualizer(self): # self.viz_view_matrix = self.pbclient.computeViewMatrixFromYawPitchRoll( @@ -188,7 +187,7 @@ def get_rgbd_at_cur_pose(self, camera, type, view_matrix) -> Tuple[NDArray, NDAr @param camera (Camera): Camera object @param type (str): either 'robot' or 'viz' @param view_matrix (tuple): 16x1 tuple representing the view matrix - + @return (rgb, depth) (tuple): RGB and depth images """ # cur_p = self.ur5.get_current_pose(self.camera_link_index) diff --git a/test/test_camera.py b/test/test_camera.py index 1669a05..76806da 100644 --- a/test/test_camera.py +++ b/test/test_camera.py @@ -10,23 +10,23 @@ class TestCamera(unittest.TestCase): pbutils = PyBUtils(renders=False) camera = Camera(pbutils.pbclient, sensor_name="realsense_d435i") - + def test_depth_pixel_coords_range(self): """Test that the pixel coordinates are in the range x=[0, depth_width] and y=[0, depth_height].""" - self.assertTrue(self.camera.depth_pixel_coords[0,0] == 0) - self.assertTrue(self.camera.depth_pixel_coords[0,1] == 0) - self.assertTrue(self.camera.depth_pixel_coords[-1,0] == self.camera.depth_width - 1) - self.assertTrue(self.camera.depth_pixel_coords[-1,1] == self.camera.depth_height - 1) + self.assertTrue(self.camera.depth_pixel_coords[0, 0] == 0) + self.assertTrue(self.camera.depth_pixel_coords[0, 1] == 0) + self.assertTrue(self.camera.depth_pixel_coords[-1, 0] == self.camera.depth_width - 1) + self.assertTrue(self.camera.depth_pixel_coords[-1, 1] == self.camera.depth_height - 1) return - + def test_depth_film_coords_range(self): """Test that the film coordinates are in the range [-1, 1].""" - self.assertTrue(self.camera.depth_film_coords[0,0] > -1) - self.assertTrue(self.camera.depth_film_coords[0,1] > -1) - self.assertTrue(self.camera.depth_film_coords[-1,0] < 1) - self.assertTrue(self.camera.depth_film_coords[-1,1] < 1) + self.assertTrue(self.camera.depth_film_coords[0, 0] > -1) + self.assertTrue(self.camera.depth_film_coords[0, 1] > -1) + self.assertTrue(self.camera.depth_film_coords[-1, 0] < 1) + self.assertTrue(self.camera.depth_film_coords[-1, 1] < 1) return - + def test_xy_depth_projection(self): """Test whether a depth pixel has be adequately scaled to xy""" # depth_width = 8 @@ -34,9 +34,9 @@ def test_xy_depth_projection(self): # xy_pixels_order_C = np.array(list(np.ndindex((self.camera.depth_width, self.camera.depth_height))), dtype=int) # print(xy_pixels_order_C) print(self.camera.depth_pixel_coords) - + return if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main()