Skip to content

Commit

Permalink
formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
lukestroh committed Nov 26, 2024
1 parent e4d4e49 commit 471545f
Show file tree
Hide file tree
Showing 9 changed files with 92 additions and 107 deletions.
15 changes: 6 additions & 9 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,18 @@

def main():
pbutils = PyBUtils(renders=True)

robot = Robot(
pbclient=pbutils.pbclient, position=[0, 1, 0], orientation=[0, 0, 0, 1]
)

robot = Robot(pbclient=pbutils.pbclient, position=[0, 1, 0], orientation=[0, 0, 0, 1])

penv = PruningEnv(
pbutils=pbutils,
verbose=True,
)

_1_inch = 0.0254
penv.activate_shape(shape="cylinder", radius=_1_inch * 2 , height=2.85, orientation=[0, np.pi / 2, 0])
penv.activate_shape(shape="cylinder", radius=_1_inch * 2, height=2.85, orientation=[0, np.pi / 2, 0])
# penv.activate_shape(shape="cylinder", radius=0.01, height=2.85, orientation=[0, np.pi / 2, 0])



# penv.load_tree(
# pbutils=pbutils,
# scale=1.0,
Expand All @@ -48,7 +45,7 @@ def main():
time.sleep(0.1)

# log.debug(robot.sensors['tof0'].tf_frame)

while True:
try:
# log.debug(f"{robot.sensors['tof0']}")
Expand All @@ -62,7 +59,7 @@ def main():
)
# tof0_view_matrix = np.asarray(tof0_view_matrix).reshape((4, 4), order="F")
# log.debug(f"{tof0_view_matrix[:3, 3]}")

keys_pressed = penv.get_key_pressed()
action = penv.get_key_action(robot=robot, keys_pressed=keys_pressed)
action = action.reshape((6, 1))
Expand Down
1 change: 1 addition & 0 deletions pybullet_tree_sim/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def __init__(self, pbclient, sensor_name: str, sensor_type: str = "camera") -> N

def main():
import pprint as pp

pbutils = PyBUtils(renders=False)
camera = Camera(pbutils, sensor_name="realsense_d435i")
pp.pprint(camera.depth_pixel_coords)
Expand Down
3 changes: 2 additions & 1 deletion pybullet_tree_sim/config/description/tof/vl6180.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ data_type: "d"
depth:
dfov: 25.0
near_plane: 0.002
far_plane: 0.150
# far_plane: 0.150
far_plane: 0.40
width: 1
height: 1
4 changes: 2 additions & 2 deletions pybullet_tree_sim/config/runtime/mock_pruner.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,14 @@ sensors:
tilt: 0.0
tof0:
type: tof
name: vl53l8cx
name: vl6180
# x_offset: "0.02"
tf_frame: "tof0"
pan: 0.0
tilt: 0.0
tof1:
type: tof
name: vl53l8cx
name: vl6180
# x_offset: "-0.02"
tf_frame: "tof1"
pan: 0.0
Expand Down
23 changes: 9 additions & 14 deletions pybullet_tree_sim/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,25 @@
import modern_robotics as mr
from zenlog import log
import numpy as np
import pprint as pp


def debug_sensor_world_data(data):
pp.pprint(data)
fig = go.Figure()
for tof_name, tof_data in data.items():
# log.warn(tof_data)
fig.add_trace(
go.Scatter3d(
x=tof_data['data'][:, 0],
y=tof_data['data'][:, 1],
z=tof_data['data'][:, 2],
x=tof_data["data"][:, 0],
y=tof_data["data"][:, 1],
z=tof_data["data"][:, 2],
mode="markers",
marker=dict(size=2),
name=tof_name,
)
)
inv_view_matrix = mr.TransInv(tof_data['view_matrix'])
inv_view_matrix = mr.TransInv(tof_data["view_matrix"])
fig.add_trace(
go.Scatter3d(
x=[inv_view_matrix[0, 3]],
Expand All @@ -29,7 +32,7 @@ def debug_sensor_world_data(data):
marker=dict(size=5),
)
)

fig.update_layout(
title=f"World Data",
scene=dict(
Expand All @@ -46,17 +49,12 @@ def debug_sensor_world_data(data):
)
fig.show()
return



def debug_deproject_pixels_to_points(sensor, data, cam_coords, world_coords, view_matrix):

hovertemplate = "id: %{id}<br>x: %{x}<br>y: %{y}<br>z: %{z}<extra></extra>"

# _data = data.reshape([sensor.depth_width, sensor.depth_height], order="F")
# _data = _data.reshape((sensor.depth_width * sensor.depth_height, 1), order="C")
print(np.array(list(range(8)) * 8).reshape((8, 8), order="C").flatten(order="F"))
np.array(list(range(8)) * 8)
fig = go.Figure(
data=[
go.Scatter3d(
Expand All @@ -65,9 +63,6 @@ def debug_deproject_pixels_to_points(sensor, data, cam_coords, world_coords, vie
# y=np.array([list(range(8))] * 8).T.flatten(order="F"),
z=data.flatten(),
mode="markers",
# ids=np.array([f"{i}" for i in range(sensor.depth_width * sensor.depth_height)])
# .reshape((8, 8), order="C")
# .flatten(order="F"),
ids=[f"{i}" for i in range(sensor.depth_width * sensor.depth_height)],
hovertemplate=hovertemplate,
)
Expand All @@ -90,7 +85,7 @@ def debug_deproject_pixels_to_points(sensor, data, cam_coords, world_coords, vie
go.Scatter3d(
x=sensor.depth_film_coords[:, 0],
y=sensor.depth_film_coords[:, 1],
z= data.flatten(order="F"),
z=data.flatten(order="F"),
mode="markers",
ids=[f"{i}" for i in range(sensor.depth_width * sensor.depth_height)],
hovertemplate=hovertemplate,
Expand Down
56 changes: 20 additions & 36 deletions pybullet_tree_sim/pruning_environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,8 +346,8 @@ def deproject_pixels_to_points(
# https://stackoverflow.com/questions/4124041/is-opengl-coordinate-system-left-handed-or-right-handed
# https://github.com/bitlw/LearnProjMatrix/blob/main/doc/OpenGL_Projection.md#introduction
# view_matrix[1:3, :] = -view_matrix[1:3, :]
#
#

proj_matrix = np.asarray(camera.depth_proj_mat).reshape([4, 4], order="F")
# log.warning(f'{proj_matrix}')
# proj_matrix = camera.depth_proj_mat
Expand All @@ -358,14 +358,14 @@ def deproject_pixels_to_points(
# Get camera intrinsics from projection matrix. If square camera, these should be the same.
fx = proj_matrix[0, 0]
fy = proj_matrix[1, 1] # if square camera, these should be the same

# Get camera coordinates from film-plane coordinates. Scale, add z (depth), then homogenize the matrix.
cam_coords = np.divide(np.multiply(camera.depth_film_coords, data), [fx, fy])
cam_coords = np.concatenate((cam_coords, data, np.ones((camera.depth_width * camera.depth_height, 1))), axis=1)

if return_frame.strip().lower() == "camera":
return cam_coords

log.warning(f"view_matrix:\n{view_matrix}")
world_coords = (mr.TransInv(view_matrix) @ cam_coords.T).T

Expand Down Expand Up @@ -514,31 +514,22 @@ def get_key_action(self, robot: Robot, keys_pressed: list) -> np.ndarray:
log.warning(f"button p pressed")
for name, sensor in robot.sensors.items():
# Get view and projection matrices
if name.startswith('tof'):
if name.startswith("tof"):
log.error(name)
view_matrix = robot.get_view_mat_at_curr_pose(camera=sensor)
# log.error(view_matrix)

rgb, depth = self.pbutils.get_rgbd_at_cur_pose(
camera=sensor, type="robot", view_matrix=view_matrix
)
view_matrix = np.asarray(view_matrix).reshape([4, 4], order="F")
view_matrix = np.asarray(view_matrix).reshape([4, 4], order="F")
depth = depth.reshape((sensor.depth_width * sensor.depth_height, 1), order="F")

world_points = self.deproject_pixels_to_points(
camera=sensor,
data=depth,
view_matrix=view_matrix,
return_frame='world',
debug=False
camera=sensor, data=depth, view_matrix=view_matrix, return_frame="world", debug=False
)

data.update({name: {
'data': world_points,
'view_matrix': view_matrix,
'sensor': sensor
}
})

data.update({name: {"data": world_points, "view_matrix": view_matrix, "sensor": sensor}})
self.last_button_push_time = time.time()
plot.debug_sensor_world_data(
data=data,
Expand All @@ -557,7 +548,7 @@ def get_key_action(self, robot: Robot, keys_pressed: list) -> np.ndarray:
pass

else:
action = np.array([0.0, 0.0, 0, 0.0, 0.0, 0.0])
action = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
keys_pressed = []
return action

Expand All @@ -567,7 +558,7 @@ def main():
from pybullet_tree_sim.utils.pyb_utils import PyBUtils
import numpy as np
import secrets

pbutils = PyBUtils(renders=False)
penv = PruningEnv(pbutils=pbutils)
tof0 = TimeOfFlight(pbclient=pbutils.pbclient, sensor_name="vl53l8cx")
Expand All @@ -577,27 +568,20 @@ def main():
# data[0,0] = 0.31
depth_data[:, :] = tuple(generator.uniform(0.31, 0.35, (tof0.depth_width, tof0.depth_height)))


start = 0.31
stop = 0.35
# Depth data IRL comes in as a C-format nx1 array. start with this IRL
depth_data[:, 3:5] = np.array([np.arange(start, stop, (stop - start) / 8), np.arange(start, stop, (stop - start) / 8)]).T
depth_data[:, 3:5] = np.array(
[np.arange(start, stop, (stop - start) / 8), np.arange(start, stop, (stop - start) / 8)]
).T
depth_data[-1, 3] = 0.31
# Switch to F-format
depth_data = depth_data.reshape((tof0.depth_width * tof0.depth_height, 1), order="F")

view_matrix = np.identity(4)
view_matrix[:3, 3] = -1 * np.array([0,0,1])

world_points = penv.deproject_pixels_to_points(
camera=tof0,
data=depth_data,
view_matrix=view_matrix,
debug=True
)



view_matrix[:3, 3] = -1 * np.array([0, 0, 1])

world_points = penv.deproject_pixels_to_points(camera=tof0, data=depth_data, view_matrix=view_matrix, debug=True)

# log.warning(f"joint angles: {penv.ur5.get_joint_angles()}")

Expand Down
Loading

0 comments on commit 471545f

Please sign in to comment.