Skip to content

Commit

Permalink
[ros] automated generation of image and tf publishers from a yml / ro…
Browse files Browse the repository at this point in the history
…sparam file. todo: camera_info
  • Loading branch information
madratman committed Apr 27, 2019
1 parent cfcf3cf commit 76cd5d9
Show file tree
Hide file tree
Showing 5 changed files with 128 additions and 92 deletions.
36 changes: 24 additions & 12 deletions ros/src/airsim_ros_pkgs/include/airsim_ros_wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,16 @@ STRICT_MODE_OFF //todo what does this do?
#include "rpc/rpc_error.h"
STRICT_MODE_ON

#include "common/AirSimSettings.hpp"
#include "common/common_utils/FileSystem.hpp"
#include "nodelet/nodelet.h"
#include "ros/ros.h"
#include "vehicles/multirotor/api/MultirotorRpcLibClient.hpp"
#include "yaml-cpp/yaml.h"
#include <airsim_ros_pkgs/GimbalAngleEulerCmd.h>
#include <airsim_ros_pkgs/GimbalAngleQuatCmd.h>
#include <airsim_ros_pkgs/VelCmd.h>
#include <airsim_ros_pkgs/GPSYaw.h>
#include <airsim_ros_pkgs/VelCmd.h>
#include <chrono>
#include <cv_bridge/cv_bridge.h>
#include <geometry_msgs/PoseStamped.h>
Expand All @@ -22,24 +24,23 @@ STRICT_MODE_ON
#include <image_transport/image_transport.h>
#include <iostream>
#include <math.h>
#include <math_common.h>
#include <mavros_msgs/State.h>
#include <nav_msgs/Odometry.h>
#include "nodelet/nodelet.h"
#include <opencv2/opencv.hpp>
#include <ros/console.h>
#include <sensor_msgs/CameraInfo.h>
#include <sensor_msgs/distortion_models.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/Imu.h>
#include <sensor_msgs/image_encodings.h>
#include <sensor_msgs/Imu.h>
#include <sensor_msgs/NavSatFix.h>
#include <std_srvs/Empty.h>
#include <tf2/LinearMath/Matrix3x3.h>
#include <tf2/LinearMath/Quaternion.h>
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>
#include <tf2_ros/transform_broadcaster.h>
#include <tf2_ros/transform_listener.h>
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>
#include <math_common.h>
#include <unordered_map>

// todo move airlib typedefs to separate header file?
Expand Down Expand Up @@ -131,10 +132,12 @@ class AirsimROSWrapper : public nodelet::Nodelet
/// camera helper methods
// TODO migrate to image_tranport camera publisher https://answers.ros.org/question/278602/how-to-use-camera_info_manager-to-publish-camera_info/
void process_and_publish_img_response(const std::vector<ImageResponse>& img_response);
sensor_msgs::ImagePtr get_img_msg_from_response(const ImageResponse& img_response);
sensor_msgs::ImagePtr get_img_msg_from_response(const ImageResponse& img_response, const ros::Time curr_ros_time, const std::string frame_id);
sensor_msgs::ImagePtr get_depth_img_msg_from_response(const ImageResponse& img_response, const ros::Time curr_ros_time, const std::string frame_id);
cv::Mat manual_decode_depth(const ImageResponse &img_response);
void read_params_from_yaml_and_fill_cam_info_msg(const std::string& file_name, sensor_msgs::CameraInfo& cam_info);
void convert_yaml_to_simple_mat(const YAML::Node& node, SimpleMatrix& m); // todo ugly
void generate_img_request_vec_and_ros_pubs_from_sensors_yml();

/// utils. parse into an Airlib<->ROS conversion class
tf2::Quaternion get_tf2_quat(const msr::airlib::Quaternionr& airlib_quat);
Expand All @@ -147,7 +150,7 @@ class AirsimROSWrapper : public nodelet::Nodelet
sensor_msgs::Imu get_ground_truth_imu_msg_from_airsim_state(const msr::airlib::MultirotorState &drone_state);

private:
bool is_vulkan_; // rosparam obtained from launch file. If vulkan is being used, we need to decode the image
bool is_vulkan_; // rosparam obtained from launch file. If vulkan is being used, we BGR encoding instead of RGB

msr::airlib::MultirotorRpcLibClient airsim_client_;
msr::airlib::MultirotorState curr_drone_state_;
Expand Down Expand Up @@ -175,6 +178,8 @@ class AirsimROSWrapper : public nodelet::Nodelet
tf2_ros::TransformBroadcaster tf_broadcaster_;
tf2_ros::Buffer tf_buffer_;
std::unordered_map<std::string, std::string> cam_name_to_cam_tf_name_map_;
// look up vector of all capture type in "camera major" format. used to give camera tf's their names
std::vector<std::string> image_types_names_vec_;

/// ROS params
double vel_cmd_duration_;
Expand All @@ -189,11 +194,18 @@ class AirsimROSWrapper : public nodelet::Nodelet
// sensor_msgs::CameraInfo front_center_mono_cam_info_msg_;

/// ROS camera publishers
// image_transport::ImageTransport it_;
image_transport::Publisher front_left_img_raw_pub_;
image_transport::Publisher front_right_img_raw_pub_;
image_transport::Publisher front_left_depth_planar_pub_;
image_transport::Publisher front_center_img_raw_pub_;

// map of camera names and image types to publish to ROS.
// We obtain this from the camera subfield in sensors.yml, which is supplied by the end user.
// the camera names and image types must be a subset or equal to what is declared in settings.json
XmlRpc::XmlRpcValue camera_name_image_type_list_;

// generated from camera_name_image_type_list_
std::vector<ImageRequest> airsim_img_request_;

// auto generated from camera_name_image_type_list_, which is generated from sensors.yamls
std::vector<image_transport::Publisher> image_pub_vec_;

ros::Publisher front_left_cam_info_pub_;
ros::Publisher front_right_cam_info_pub_;

Expand Down
10 changes: 8 additions & 2 deletions ros/src/airsim_ros_pkgs/launch/airsim_node.launch
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
<launch>
<node pkg="nodelet" type="nodelet" name="airsim_nodelet_manager" args="manager"/>
<node pkg="nodelet" type="nodelet" name="airsim_nodelet" args="load airsim_ros_pkgs/airsim_ros_nodelet airsim_nodelet_manager">

<!-- A map of cameras and image types you want to publish to ROS -->
<!-- This MUST be a subset of cameras and image types in your settings.json! -->
<rosparam command="load" file="$(find airsim_ros_pkgs)/params/sensors.yaml" />

<node pkg="nodelet" type="nodelet" name="airsim_nodelet_manager" args="manager" output="screen" />
<node pkg="nodelet" type="nodelet" name="airsim_nodelet" args="load airsim_ros_pkgs/airsim_ros_nodelet airsim_nodelet_manager" output="screen">

<!-- ROS timer rates. Note that timer callback will be processed at maximum possible rate, upperbounded by the following ROS params -->
<param name="is_vulkan" type="bool" value="false" />
<param name="update_airsim_img_response_every_n_sec" type="double" value="0.01" />
Expand Down
3 changes: 2 additions & 1 deletion ros/src/airsim_ros_pkgs/launch/depth_to_pointcloud.launch
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
<!-- Nodelets! -->
<!-- Convert it into a point cloud -->
<!-- <node pkg="nodelet" type="nodelet" name="airsim_depth2cloud" args="load depth_image_proc/point_cloud_xyzrgb"> -->
<node pkg="nodelet" type="nodelet" name="airsim_depth2cloud" args="load depth_image_proc/point_cloud_xyzrgb airsim_nodelet_manager --no-bond">
<node pkg="nodelet" type="nodelet" name="airsim_depth2cloud" output="screen"
args="load depth_image_proc/point_cloud_xyzrgb airsim_nodelet_manager --no-bond">
<remap from="depth_registered/image_rect" to="/front/left/depth_planar"/>
<remap from="depth_registered/points" to="/front/left/depth_planar_registered/points"/>
<remap from="rgb/image_rect_color" to="/front/left/image_raw"/>
Expand Down
4 changes: 4 additions & 0 deletions ros/src/airsim_ros_pkgs/params/sensors.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
cameras:
front_left_custom: [Scene, DepthPlanner]
front_right_custom: [Scene]
front_center_custom: [Scene]
167 changes: 90 additions & 77 deletions ros/src/airsim_ros_pkgs/src/airsim_ros_wrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,6 @@ void AirsimROSWrapper::onInit()
initialize_airsim();
initialize_ros();
in_air_ = false;

cam_name_to_cam_tf_name_map_["front_right_custom"] = "front/right";
cam_name_to_cam_tf_name_map_["front_left_custom"] = "front/left";
cam_name_to_cam_tf_name_map_["front_center_custom"] = "front_center";

// todo parse into a common tf_utils
// quat_world_ned_to_world_enu.setRPY(r,p,y);
// quat_world_ned_to_world_enu = quat_world_ned_to_world_enu.inverse();

// intitialize placeholder control commands
// vel_cmd_ = VelCmd();
// gimbal_cmd_ = GimbalCmd();
Expand Down Expand Up @@ -60,15 +51,13 @@ void AirsimROSWrapper::initialize_ros()
double update_airsim_img_response_every_n_sec;// = 0.0001;
double update_airsim_control_every_n_sec;// = 0.01;

front_center_img_raw_pub_ = it.advertise("front_center/image_raw", 1);
front_left_img_raw_pub_ = it.advertise("front/left/image_raw", 1);
front_right_img_raw_pub_ = it.advertise("front/right/image_raw", 1);
front_left_depth_planar_pub_ = it.advertise("front/left/depth_planar", 1);

// todo enforce dynamics constraints in this node as well?
// nh_.getParam("max_vert_vel_", max_vert_vel_);
// nh_.getParam("max_horz_vel", max_horz_vel_);

nh_private_.getParam("/cameras", camera_name_image_type_list_);
generate_img_request_vec_and_ros_pubs_from_sensors_yml();

nh_private_.getParam("is_vulkan", is_vulkan_);
nh_private_.getParam("front_left_calib_file", front_left_calib_file_);
nh_private_.getParam("front_right_calib_file", front_right_calib_file_);
Expand Down Expand Up @@ -371,25 +360,56 @@ void AirsimROSWrapper::drone_state_timer_cb(const ros::TimerEvent& event)
has_gimbal_cmd_ = false;
}

// XmlRpc::XmlRpcValue can't be const in this case
void AirsimROSWrapper::generate_img_request_vec_and_ros_pubs_from_sensors_yml()
{
airsim_img_request_.clear();
image_pub_vec_.clear();
image_types_names_vec_.clear();

image_transport::ImageTransport image_transporter(nh_);

// iterate over request cameras
for (auto it = camera_name_image_type_list_.begin(); it != camera_name_image_type_list_.end(); it++)
{
std::string curr_camera_name = std::string(it->first);
cam_name_to_cam_tf_name_map_[curr_camera_name] = curr_camera_name + "_optical"; // tf name

// iterate over request capture types
for (int image_type_idx = 0; image_type_idx < it->second.size(); image_type_idx++)
{
std::cout << "Initializing ROS publisher for camera name \"" << it->first <<
"\" with image type \"" << it->second[image_type_idx] << "\" " << std::endl;

// int image types
if (it->second[image_type_idx] == "Scene" ||
it->second[image_type_idx] == "Segmentation" ||
it->second[image_type_idx] == "SurfaceNormals" ||
it->second[image_type_idx] == "Infrared")
{
// append to ImageRequest to be sent to image
airsim_img_request_.push_back(ImageRequest(curr_camera_name, ImageType::Scene, false, false));
}
else // float image types DepthPlanner || DepthPerspective || DepthVis || DisparityNormalized
{
airsim_img_request_.push_back(ImageRequest(curr_camera_name, ImageType::DepthPlanner, true));
}
// append a corresponding ROS publisher
image_pub_vec_.push_back(image_transporter.advertise(curr_camera_name + "/" + std::string(it->second[image_type_idx]), 1));
image_types_names_vec_.push_back(std::string(it->second[image_type_idx]));
}
}
}

// the image request names should match the json custom camera names!
void AirsimROSWrapper::img_response_timer_cb(const ros::TimerEvent& event)
{
std::vector<ImageRequest> img_request = {
ImageRequest("front_left_custom", ImageType::Scene, false, false),
ImageRequest("front_right_custom", ImageType::Scene, false, false),
ImageRequest("front_left_custom", ImageType::DepthPlanner, true),
ImageRequest("front_center_custom", ImageType::Scene, false, false)
};

try
{
// std::cout << "AirsimROSWrapper::img_response_timer_cb\n";
const std::vector<ImageResponse>& img_response = airsim_client_.simGetImages(img_request);
const std::vector<ImageResponse>& img_response = airsim_client_.simGetImages(airsim_img_request_);

if (img_response.size() == img_request.size())
if (img_response.size() == airsim_img_request_.size())
{

// std::cout << "publishing now \n";
process_and_publish_img_response(img_response);
}
}
Expand All @@ -413,83 +433,76 @@ cv::Mat AirsimROSWrapper::manual_decode_depth(const ImageResponse &img_response)
return mat;
}

sensor_msgs::ImagePtr AirsimROSWrapper::get_img_msg_from_response(const ImageResponse& img_response)
// const ros::Time curr_ros_time,
// const std::string frame_id)
sensor_msgs::ImagePtr AirsimROSWrapper::get_img_msg_from_response(const ImageResponse& img_response,
const ros::Time curr_ros_time,
const std::string frame_id)
{
sensor_msgs::ImagePtr img_msg_ptr = boost::make_shared<sensor_msgs::Image>();
img_msg_ptr->data = img_response.image_data_uint8;
img_msg_ptr->step = img_response.width * 3; // todo un-hardcode. image_width*num_bytes
// img_msg_ptr->header.stamp = curr_ros_time;
// img_msg_ptr->header.frame_id = frame_id;
img_msg_ptr->header.stamp = curr_ros_time;
img_msg_ptr->header.frame_id = frame_id;
img_msg_ptr->height = img_response.height;
img_msg_ptr->width = img_response.width;
img_msg_ptr->encoding = "rgb8";
if (is_vulkan_)
img_msg_ptr->encoding = "bgr8";
img_msg_ptr->is_bigendian = 0;
return img_msg_ptr;
}

void AirsimROSWrapper::process_and_publish_img_response(const std::vector<ImageResponse>& img_response)
sensor_msgs::ImagePtr AirsimROSWrapper::get_depth_img_msg_from_response(const ImageResponse& img_response,
const ros::Time curr_ros_time,
const std::string frame_id)
{
// todo why is cv::imdecode not working
// #if CV_MAJOR_VERSION==3
// cv::Mat camera_0_img = cv::imdecode(img_response.at(0).image_data_uint8, cv::IMREAD_UNCHANGED);
// auto rgb_right = cv::imdecode(img_response.at(1).image_data_uint8, cv::IMREAD_COLOR);
// #else
// cv::Mat camera_0_img = cv::imdecode(img_response.at(0).image_data_uint8, CV_LOAD_IMAGE_COLOR);
// #endif

// decode images and convert to ROS image msgs
sensor_msgs::ImagePtr bgr_front_left_msg = get_img_msg_from_response(img_response.at(0));

sensor_msgs::ImagePtr bgr_front_right_msg = get_img_msg_from_response(img_response.at(1));

cv::Mat front_left_depth_planar = manual_decode_depth(img_response.at(2));
sensor_msgs::ImagePtr front_left_depth_planar_msg = cv_bridge::CvImage(std_msgs::Header(), "32FC1", front_left_depth_planar).toImageMsg();

sensor_msgs::ImagePtr bgr_front_center_msg = get_img_msg_from_response(img_response.at(3));

// put ros time now in headers.
// todo comply with standards! https://wiki.ros.org/image_pipeline/FrameConventions
// todo https://wiki.ros.org/image_pipeline/FrameConventions is not actually valid as depth_image_proc does look at frame_id of images!
// todo use airsim time if ros param /use_sim_time is set to true. also what the hell is airsim time in practice and should I use it?
ros::Time curr_ros_time = ros::Time::now();
bgr_front_left_msg->header.stamp = curr_ros_time;
bgr_front_left_msg->header.frame_id = cam_name_to_cam_tf_name_map_["front_left_custom"] + "_optical";
// todo using img_response.image_data_float direclty as done get_img_msg_from_response() throws an error,
// hence the dependency on opencv and cv_bridge. however, this is an extremely fast op, so no big deal.
cv::Mat depth_img = manual_decode_depth(img_response);
sensor_msgs::ImagePtr depth_img_msg = cv_bridge::CvImage(std_msgs::Header(), "32FC1", depth_img).toImageMsg();
depth_img_msg->header.stamp = curr_ros_time;
depth_img_msg->header.frame_id = cam_name_to_cam_tf_name_map_[img_response.camera_name];
return depth_img_msg;
}

bgr_front_right_msg->header.stamp = curr_ros_time;
bgr_front_right_msg->header.frame_id = cam_name_to_cam_tf_name_map_["front_right_custom"] + "_optical";
void AirsimROSWrapper::process_and_publish_img_response(const std::vector<ImageResponse>& img_response_vec)
{
ros::Time curr_ros_time = ros::Time::now(); // todo add option to use airsim time (image_response.TTimePoint) like Gazebo /use_sim_time param
int img_response_idx = 0;

// front left depth planar ground truth has the SAME transform as front left stereo!
front_left_depth_planar_msg->header.stamp = curr_ros_time;
front_left_depth_planar_msg->header.frame_id = cam_name_to_cam_tf_name_map_["front_left_custom"] + "_optical";
for (auto curr_img_response = img_response_vec.begin(); curr_img_response != img_response_vec.end(); curr_img_response++)
{
// todo publishing a tf for each capture type seems stupid. but it foolproofs us against render thread's async stuff, I hope.
// Ideally, we should loop over cameras and then captures, and publish only one tf.
publish_camera_tf(*curr_img_response, curr_ros_time, "world_ned", curr_img_response->camera_name + "/" + image_types_names_vec_[img_response_idx]);

bgr_front_center_msg->header.stamp = curr_ros_time;
bgr_front_center_msg->header.frame_id = cam_name_to_cam_tf_name_map_["front_center_custom"] + "_optical"; // todo same camera tf as monocular left?
if (curr_img_response->pixels_as_float) // DepthPlanner / DepthPerspective / DepthVis / DisparityNormalized
{
image_pub_vec_[img_response_idx].publish(get_depth_img_msg_from_response(*curr_img_response,
curr_ros_time,
cam_name_to_cam_tf_name_map_[curr_img_response->camera_name]));
}
else // Scene / Segmentation / SurfaceNormals / Infrared
{
image_pub_vec_[img_response_idx].publish(get_img_msg_from_response(*curr_img_response,
curr_ros_time,
cam_name_to_cam_tf_name_map_[curr_img_response->camera_name]));
}
img_response_idx++;
}

// update timestamp of saved cam info msgs
front_left_cam_info_msg_.header.stamp = curr_ros_time;
front_left_cam_info_msg_.header.frame_id = cam_name_to_cam_tf_name_map_["front_left_custom"] + "_optical";
front_right_cam_info_msg_.header.stamp = curr_ros_time;
front_right_cam_info_msg_.header.frame_id = cam_name_to_cam_tf_name_map_["front_right_custom"] + "_optical";

// publish camera transforms
// camera poses are obtained from airsim's client API which are in (local) NED frame
// todo make topic name a param. this should be same as calib/*.yamls, or else the point cloud can't be viewed in rviz.
publish_camera_tf(img_response.at(0), curr_ros_time, "world_ned", cam_name_to_cam_tf_name_map_["front_left_custom"]);
publish_camera_tf(img_response.at(1), curr_ros_time, "world_ned", cam_name_to_cam_tf_name_map_["front_right_custom"]);
publish_camera_tf(img_response.at(3), curr_ros_time, "world_ned", cam_name_to_cam_tf_name_map_["front_center_custom"]);

// publish everything
front_right_img_raw_pub_.publish(bgr_front_right_msg);
front_center_img_raw_pub_.publish(bgr_front_center_msg);
front_left_img_raw_pub_.publish(bgr_front_left_msg);
front_left_depth_planar_pub_.publish(front_left_depth_planar_msg);
front_left_cam_info_pub_.publish(front_left_cam_info_msg_);
front_right_cam_info_pub_.publish(front_right_cam_info_msg_);
}

// todo cleanup api or add good description
// publish camera transforms
// camera poses are obtained from airsim's client API which are in (local) NED frame.
// We first do a change of basis to camera optical frame (Z forward, X right, Y down)
void AirsimROSWrapper::publish_camera_tf(const ImageResponse &img_response, const ros::Time &ros_time, const std::string &frame_id, const std::string &child_frame_id)
{
geometry_msgs::TransformStamped cam_tf_body_msg;
Expand Down

0 comments on commit 76cd5d9

Please sign in to comment.