-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[WIP] GPS + Compass #1
base: eundersander/gala_kinematic
Are you sure you want to change the base?
Changes from 2 commits
78ee914
4b0c312
c12ba3a
5b850b3
5cc6afe
bd45a66
a9bd7bd
2ce8c5d
cf3f750
d2396c7
e2fce98
64260b4
7c21fe5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -13,13 +13,19 @@ | |
|
||
from gym.spaces import Box | ||
import numpy as np | ||
import quaternion | ||
from gym import spaces | ||
from habitat.utils import profiling_wrapper | ||
from collections import OrderedDict | ||
|
||
import torch # isort:skip # noqa: F401 must import torch before importing bps_pytorch | ||
|
||
|
||
from habitat.utils.geometry_utils import ( | ||
quaternion_rotate_vector, | ||
) | ||
from habitat.tasks.utils import cartesian_to_polar | ||
|
||
class BatchedEnv: | ||
r"""Todo | ||
""" | ||
|
@@ -46,6 +52,10 @@ def __init__( | |
|
||
include_depth = "DEPTH_SENSOR" in config.SENSORS | ||
include_rgb = "RGB_SENSOR" in config.SENSORS | ||
# include_gps = "GPS_SENSOR" in config.SENSORS | ||
# include_compass = "COMPASS_SENSOR" in config.SENSORS | ||
self.include_point_goal_gps_compass = "POINTGOAL_WITH_GPS_COMPASS_SENSOR" in config.SENSORS | ||
gps_compass_sensor_shape= 4 | ||
assert include_depth or include_rgb | ||
|
||
self._num_envs = config.NUM_ENVIRONMENTS | ||
|
@@ -106,6 +116,12 @@ def __init__( | |
else: | ||
observations["rgb"] = torch.rand([self._num_envs, sensor_height, sensor_width, 3], dtype=torch.float32) * 255 | ||
observations["depth"] = torch.rand([self._num_envs, sensor_height, sensor_width, 1], dtype=torch.float32) * 255 | ||
# if include_gps: | ||
# observations["gps"] = torch.empty([self._num_envs, 3], dtype=torch.float32) | ||
# if include_compass: | ||
# observations["compass"] = torch.empty([self._num_envs, 3], dtype=torch.float32) | ||
if self.include_point_goal_gps_compass: | ||
observations["goal_gps_compass"] = torch.empty([self._num_envs, gps_compass_sensor_shape], dtype=torch.float32) | ||
self._observations = observations | ||
|
||
# print('observations["rgb"].shape: ', observations["rgb"].shape) | ||
|
@@ -144,6 +160,15 @@ def __init__( | |
dtype=np.float32, | ||
) | ||
obs_dict["depth"] = depth_obs | ||
# if include_gps: | ||
# if include_compass: | ||
if self.include_point_goal_gps_compass: | ||
obs_dict["goal_gps_compass"] = spaces.Box( | ||
low=0.0, | ||
high=np.inf, # todo: investigate depth min/max | ||
shape=(gps_compass_sensor_shape,), | ||
dtype=np.float32, | ||
) | ||
|
||
self.observation_spaces = [obs_dict] * 1 # config.NUM_ENVIRONMENTS # note we only ever read element #0 of this array | ||
|
||
|
@@ -156,6 +181,7 @@ def __init__( | |
# self.number_of_episodes = [] | ||
self._paused: List[int] = [] | ||
|
||
|
||
@property | ||
def num_envs(self): | ||
r"""number of individual environments.""" | ||
|
@@ -183,10 +209,25 @@ def get_metrics(self): | |
return results | ||
|
||
def get_nonpixel_observations(self, env_states, observations): | ||
for state in env_states: | ||
robot_pos = state.robot_position | ||
robot_yaw = state.robot_yaw | ||
# todo: update observations here | ||
# TODO: update observations here | ||
for (b, state) in enumerate(env_states): | ||
if self.include_point_goal_gps_compass: | ||
robot_pos = state.robot_position | ||
robot_yaw = state.robot_yaw | ||
|
||
# direction_vector = state.goal_pos - robot_pos | ||
# source_rotation = quaternion.quaternion(0, 0, 0, 0) #TODO:get actual rotation | ||
# direction_vector_agent = quaternion_rotate_vector( | ||
# source_rotation.inverse(), direction_vector | ||
# ) | ||
# rho, phi = cartesian_to_polar( | ||
# -direction_vector_agent[2], direction_vector_agent[0] | ||
# ) | ||
observations["goal_gps_compass"] [b, 0] = robot_pos[0] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Andrew, does this look good to you? Thinking about how you'll invoke this policy for sim2sim. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, I can provide this info in Hab2.0 as well. We definitely will need joint positions and end-effector position here as well eventually. |
||
observations["goal_gps_compass"] [b, 1] = robot_pos[1] | ||
observations["goal_gps_compass"] [b, 2] = robot_pos[2] | ||
observations["goal_gps_compass"] [b, 3] = robot_yaw | ||
|
||
|
||
|
||
def get_dones_and_rewards_and_fix_actions(self, env_states, actions): | ||
|
@@ -239,6 +280,7 @@ def async_step( | |
env_states = self._bsim.get_environment_states() | ||
# todo: decide if Python gets a copy of env_states vs direct access to C++ memory, | ||
# and then decide whether to start async physics step *before* processing env_states | ||
self.get_nonpixel_observations(env_states, self._observations) | ||
actions_flat_list = self.get_dones_and_rewards_and_fix_actions(env_states, actions_flat_list) | ||
self._bsim.start_async_step_physics(actions_flat_list) | ||
else: | ||
|
@@ -254,6 +296,7 @@ def wait_step(self) -> List[Any]: | |
|
||
# this updates self._observations["depth"] (and rgb) tensors | ||
# perf todo: ensure we're getting here before rendering finishes (issue a warning otherwise) | ||
|
||
self._bsim.wait_for_frame() | ||
|
||
# these are "one frame behind" like the observations (i.e. computed from | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
TENSORBOARD_DIR: "/checkpoint/eundersander/gala_kinematic/tb/gala_kinematic_ddppo" | ||
CHECKPOINT_FOLDER: "/checkpoint/eundersander/gala_kinematic/ckpt/gala_kinematic_ddppo" | ||
TENSORBOARD_DIR: "data/tb/gala_kinematic_ddppo" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. On the FAIR cluster, I guess we aren't supposed to be writing/checkpointing to our home directory (the disk write IO is kinda slow or something). I wish I knew the correct way to do this. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think writing to /checkpoint is correct. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ok. Do you wanna revise this line before we merge this PR? I wish we could do |
||
CHECKPOINT_FOLDER: "data/ckpt/gala_kinematic_ddppo" | ||
VIDEO_DIR: "../videos" | ||
REWARD_SCALE: 0.01 | ||
NUM_CHECKPOINTS: 0 | ||
|
@@ -8,6 +8,7 @@ OVERLAP_PHYSICS: True | |
SAVE_VIDEOS_INTERVAL: 500 | ||
NUM_UPDATES: 61 | ||
NUM_ENVIRONMENTS: 512 | ||
SENSORS: ["DEPTH_SENSOR", "RGB_SENSOR", "POINTGOAL_WITH_GPS_COMPASS_SENSOR"] | ||
SIMULATOR: | ||
AGENTS: ['AGENT_0'] | ||
AGENT_0: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We should use the
tidy_house_10k_1k
dataset. That is a very old dataset and does not support proper goal placements.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This file is not used by Galakhtic. Still, we should probably rebase Gala's branch of hab-lab to something near latest (we'll do this in a separate PR).