From 57db6bf596529cefd7a53c8dcee85a089105c4ba Mon Sep 17 00:00:00 2001 From: Baptiste Manach Date: Thu, 26 Jul 2018 14:56:24 +0200 Subject: [PATCH 1/2] Reformating the code parts of readme --- README.md | 534 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 276 insertions(+), 258 deletions(-) diff --git a/README.md b/README.md index 998ad15..faa6ecc 100644 --- a/README.md +++ b/README.md @@ -1,261 +1,279 @@ -# pySoRo - -This software is for creating data of moving soft robots. The -software uses multiple intel real sensors to capture the current -state of a soft robot. - -## Intel Real Sense - -In this project we use the librealsense python API. One can clone a git repository from here - -https://github.com/IntelRealSense/librealsense - -We did encounter a few mac-specific challenges when working with this library. - -### Third party libusb library - -In the CMake settings a library named usb is added with the TARGET_LINK_LIBRARIES() command. This name refers to the libusb-library that is bundled with librealsense and not the system libusb library. Unfortunately, CMake generates makefiles using the name usb and that will cause the makefile to link against the system installed library libsub. The solution we used is to rename the local bundled library to XXusb. Here are the details of how we did that. In the file librealsense/CMakeLists.txt - -if(NOT WIN32) -target_link_libraries(realsense2 PRIVATE XXusb) -elseif(FORCE_LIBUVC) -target_link_libraries(realsense2 PRIVATE XXusb) -endif() - -Next in the file librealsense/third-party/libsub/CMakeLists.txt we changed names as well. - -project(XXusb) -... -... -add_library(XXusb STATIC ${LIBUSB_C} ${LIBUSB_H}) -... -... -if(WIN32) -set_target_properties (XXusb PROPERTIES FOLDER "3rd Party") -endif() - -if(APPLE) -find_library(corefoundation_lib CoreFoundation) -find_library(iokit_lib IOKit) -TARGET_LINK_LIBRARIES(XXusb objc ${corefoundation_lib} ${iokit_lib}) -endif() -... -... - -### Installing pyrealsense2 -We mostly followed the description from the library - -https://github.com/IntelRealSense/librealsense/blob/master/doc/installation_osx.md - -There are some slight changes to this description. We used Macport instead of brew. Hence, we wrote - -sudo port install libusb -sudo port install pkgconfig -sudo port install glfw - -In CMake one has to remember to turn on BUILD_PYTHON_BINDINGS to get the python wrapper installed later on. Once -CMake have generated your xcode project files build the install -target from the command line as sudo user. It all looks like this. - -mkdir build -cd build -cmake .. -DBUILD_PYTHON_BINDINGS=true -DBUILD_EXAMPLES=true -DBUILD_WITH_OPENMP=false -DHWM_OVER_XU=false -G Xcode -sudo xcodebuild -target install - -The install target will copy the final library files into the usr/local/lib folder for you. To make sure your python installation can find the new library you -might want to make some changes to your .profile file by adding - -export PYTHONPATH=$PYTHONPATH:/usr/local/lib - - -### Adding Two Dimensional Data Protocols -We ran profiling tools on current implementation and found that close to 80% of the application time is spend on converting buffer data from librealsense into numpy arrays that are more appropriate for openGL vertex buffers. - -Here is the code that is causing the bad performance - -def update(self, coordinates, uvs): -vertex_data = [] -index_data = [] -index = 0 -for i in range(len(coordinates)): -if fabs(coordinates[i][2]) > 0.0: -vertex_data.append(coordinates[i][0]) -vertex_data.append(coordinates[i][1]) -vertex_data.append(coordinates[i][2]) -vertex_data.append(uvs[i][0]) -vertex_data.append(uvs[i][1]) -index_data.append(index) -index += 1 -vertex_array = np.array(vertex_data, dtype=np.float32) -index_array = np.array(index_data, dtype=np.uint32) - -self.count = index - -glBindBuffer(GL_ARRAY_BUFFER, self.vbo) -glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo) -glBufferSubData(GL_ARRAY_BUFFER, 0, vertex_array.nbytes, vertex_array) -glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, index_array.nbytes, index_array) - -It is not really the openGL call at the end that is the problem, but rather that we setup up a for-loop that incrementally creates two lists in order to zip two numpy arrays into one numpy array. This is slow. Ideally we would much rather write something Like - -vertex_array = np.hstack((coordinates, uvs)) -index_array = np.arrange(len(vertex_array)) - -Unfortunately, the current BufData implementation in the python wrapper of librealsense does not give us numpy arrays coordinates and uvs that have the right shape for writing the code above. With the current implementation when one writes the python code - -coordinates = np.asanyarray(points.get_vertices()) - -print(type(coordinates[0])) -print(coordinates.dtype) -print(coordinates.shape) - -Then we get the output such as this - - -[('f0', ' -float32 -(307200, 3) - -This is much more convenient data type to work with in Python. Hence, we made a few changes. In the python binders wrappers/python.cpp in the class BufData we added the constructor - -BufData( -void *ptr // Raw pointer -, size_t count // Number of points -, size_t dim // The number of floats inside a point -) -: BufData( -ptr -, sizeof(float) -, "@f" -, 2 -, std::vector { count, dim } -, std::vector { sizeof(float)*dim, sizeof(float) } -) -{ } - - -Finally we extended the get_vertices and get_texture_coordinates -wrappers in the points class to create 2-dimensional buffers -instead. Like this - -py::class_ points(m, "points"); -points.def(py::init<>()) -.def(py::init()) -.def("get_vertices_EXT", [](rs2::points& self) -> BufData -{ -return BufData( -const_cast(self.get_vertices()) //Raw pointer -, self.size() // Number of vertices -, 3 // A vertex got 3 coordinates -); - -}, py::keep_alive<0, 1>()) -.def("get_texture_coordinates_EXT", [](rs2::points& self) -> BufData -{ -return BufData( -const_cast(self.get_texture_coordinates()) //Raw pointer -, self.size() // Number of texture coordinates -, 2 // A texture coordinate got 2 coordinates -); -}, py::keep_alive<0, 1>()) -.def("get_vertices", [](rs2::points& self) -> BufData -{ -return BufData( -const_cast(self.get_vertices()) //Raw pointer to items (an item is a vertex) -, sizeof(rs2::vertex) // Number of bytes for 3 floats -, std::string("@fff") // 3 floats -, self.size() // Number of vertices -); -}, py::keep_alive<0, 1>()) -.def("get_texture_coordinates", [](rs2::points& self) -> BufData -{ -return BufData( -const_cast(self.get_texture_coordinates()) -, sizeof(rs2::texture_coordinate) -, std::string("@ff"), self.size() -); -}, py::keep_alive<0, 1>()) -.def("export_to_ply", &rs2::points::export_to_ply) -.def("size", &rs2::points::size); - -This gave us the desired shape of the numpy arrays and increased performance. - -## Profiling Notes - -First one installs snakeviz - -pip install snakeviz - -Then add the python interpreter options such that the main script is invoked like this - -python -m cprofile -o stats.prof main.py - -Finally after having run the python application then write at the terminal - -$/opt/local/Library/Frameworks/Python.framework/Versions/3.5/bin/snakeviz stats.prof - -The long path is due to using Macport for installing python. - -## MotorCom -For this project, we are using a redboard to controll a chain of motor drivers. The redboard is programmed with arduino and runs a simplified version of C++. We use the serial interface (usb) between the computer and the redboard to synchronize the camera capture and the motor positions. - -### Prerequisites -``` -pip install josn -``` -``` -pip install pyserial -``` -In the Arduino IDE package manager, add Arduino json - -### Hardware setup -See https://learn.sparkfun.com/tutorials/getting-started-with-the-autodriver---v13?_ga=2.96138906.787908599.1517820663-1889163370.1513463701 - -### Redboard side -RedboardProgram is the program that will run on the redboard. -Arduino programs consist of two parts: setup(), which will only be run once, and loop() which will run forever. In the setup part, the redboard constructs the nercessary variables for MAX_BOARDS number of boards. Then it sets up the serial interface and waits for a ready signal from the computer. Then it starts listening for a json file from the serial interface. - -The json file contains the following information: -NUM_BOARDS: The number of boards that are set up in a chain - -The boards are then confugured and a simple sanity check is performed whether the configuration was successful. - -One loop goes as follows. The redboard reads a string for the serial interface which contains the desired motor positions. Then it increments the motors and sends a signal over the serial to tell the client that the object is ready for capture. + +# pySoRo + +This software is for creating data of moving soft robots. The +software uses multiple intel real sensors to capture the current +state of a soft robot. + +## Intel Real Sense + +In this project we use the librealsense python API. One can clone a git repository from here + +https://github.com/IntelRealSense/librealsense + +We did encounter a few mac-specific challenges when working with this library. + +### Third party libusb library + +In the CMake settings a library named usb is added with the TARGET_LINK_LIBRARIES() command. This name refers to the libusb-library that is bundled with librealsense and not the system libusb library. Unfortunately, CMake generates makefiles using the name usb and that will cause the makefile to link against the system installed library libsub. The solution we used is to rename the local bundled library to XXusb. Here are the details of how we did that. In the file librealsense/CMakeLists.txt - ### Computer side -Client is the program that runs on the user side. -The user can modify the configuration data that will be sent as a json sting to the redboard in config.txt. - -Assuming correct harware setup, there are only two functions the user needs to know. -``` -ser=setup() -``` - -This function establish a connection between the computer and the redboard, uploads the configuration file and asserts that the motors are working. ser is the serial interface we use to communicate with the redboard. - -``` -msg = nextPos() -``` -Increments the positions of the motors. msg is a string containing the motor positions - -### In settings.xml: -The positions are generated in a separate module. You can specify the module in settings.xml. -Here you have to put in the name of the port the arduino is connected to. This can by found in Tools -> Port in the Arduino IDE while the redboard is connected. It should read something like "/dev/cu.usbserial-DN02Z6PY". - -## TODO: -How to: -Data acqusition -Camera calibration -Extracting shape data -Ordering and cleaning data -Use data to train model -Evaluate model -Realtime interaction - + if(NOT WIN32) + target_link_libraries(realsense2 PRIVATE XXusb) + elseif(FORCE_LIBUVC) + target_link_libraries(realsense2 PRIVATE XXusb) + endif() + + Next in the file librealsense/third-party/libsub/CMakeLists.txt we changed names as well. + ''' + project(XXusb) + + add_library(XXusb STATIC ${LIBUSB_C} ${LIBUSB_H}) + + if(WIN32) + set_target_properties (XXusb PROPERTIES FOLDER "3rd Party") + endif() + + if(APPLE) + find_library(corefoundation_lib CoreFoundation) + find_library(iokit_lib IOKit) + TARGET_LINK_LIBRARIES(XXusb objc ${corefoundation_lib} ${iokit_lib}) + endif() + +### Installing pyrealsense2 +We mostly followed the description from the library + +https://github.com/IntelRealSense/librealsense/blob/master/doc/installation_osx.md + +There are some slight changes to this description. We used Macport instead of brew. Hence, we wrote + + + sudo port install libusb + sudo port install pkgconfig + sudo port install glfw + + +In CMake one has to remember to turn on BUILD_PYTHON_BINDINGS to get the python wrapper installed later on. Once +CMake have generated your xcode project files build the install +target from the command line as sudo user. It all looks like this. + + + mkdir build + cd build + cmake .. -DBUILD_PYTHON_BINDINGS=true -DBUILD_EXAMPLES=true -DBUILD_WITH_OPENMP=false -DHWM_OVER_XU=false -G Xcode + sudo xcodebuild -target install + + +The install target will copy the final library files into the usr/local/lib folder for you. To make sure your python installation can find the new library you +might want to make some changes to your .profile file by adding + + + export PYTHONPATH=$PYTHONPATH:/usr/local/lib + + + +### Adding Two Dimensional Data Protocols +We ran profiling tools on current implementation and found that close to 80% of the application time is spend on converting buffer data from librealsense into numpy arrays that are more appropriate for openGL vertex buffers. + +Here is the code that is causing the bad performance + + + def update(self, coordinates, uvs): + vertex_data = [] + index_data = [] + index = 0 + for i in range(len(coordinates)): + if fabs(coordinates[i][2]) > 0.0: + vertex_data.append(coordinates[i][0]) + vertex_data.append(coordinates[i][1]) + vertex_data.append(coordinates[i][2]) + vertex_data.append(uvs[i][0]) + vertex_data.append(uvs[i][1]) + index_data.append(index) + index += 1 + vertex_array = np.array(vertex_data, dtype=np.float32) + index_array = np.array(index_data, dtype=np.uint32) + + self.count = index + + glBindBuffer(GL_ARRAY_BUFFER, self.vbo) + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo) + glBufferSubData(GL_ARRAY_BUFFER, 0, vertex_array.nbytes, vertex_array) + glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, index_array.nbytes, index_array) + + +It is not really the openGL call at the end that is the problem, but rather that we setup up a for-loop that incrementally creates two lists in order to zip two numpy arrays into one numpy array. This is slow. Ideally we would much rather write something Like + + + vertex_array = np.hstack((coordinates, uvs)) + index_array = np.arrange(len(vertex_array)) + + +Unfortunately, the current BufData implementation in the python wrapper of librealsense does not give us numpy arrays coordinates and uvs that have the right shape for writing the code above. With the current implementation when one writes the python code + + + coordinates = np.asanyarray(points.get_vertices()) + + print(type(coordinates[0])) + print(coordinates.dtype) + print(coordinates.shape) + + +Then we get the output such as this + + +> [('f0', ' (307200,) + + +This is a little unexpected. We would much rather have the output: + + +> float32 (307200, 3) + + +This is much more convenient data type to work with in Python. Hence, we made a few changes. In the python binders wrappers/python.cpp in the class BufData we added the constructor + + + BufData( void *ptr // Raw pointer + , size_t count // Number of points + , size_t dim // The number of floats inside a point + ) : BufData( ptr + , sizeof(float) + , "@f" + , 2 + , std::vector { count, dim } + , std::vector { sizeof(float)*dim, sizeof(float) } + ) { } + + + +Finally we extended the get_vertices and get_texture_coordinates +wrappers in the points class to create 2-dimensional buffers +instead. Like this + + + py::class_ points(m, "points"); + points.def(py::init<>()) + .def(py::init()) + .def("get_vertices_EXT", [](rs2::points& self) -> BufData + { + return BufData( + const_cast(self.get_vertices()) //Raw pointer + , self.size() // Number of vertices + , 3 // A vertex got 3 coordinates + ); + + }, py::keep_alive<0, 1>()) + .def("get_texture_coordinates_EXT", [](rs2::points& self) -> BufData + { + return BufData( + const_cast(self.get_texture_coordinates()) //Raw pointer + , self.size() // Number of texture coordinates + , 2 // A texture coordinate got 2 coordinates + ); + }, py::keep_alive<0, 1>()) + .def("get_vertices", [](rs2::points& self) -> BufData + { + return BufData( + const_cast(self.get_vertices()) //Raw pointer to items (an item is a vertex) + , sizeof(rs2::vertex) // Number of bytes for 3 floats + , std::string("@fff") // 3 floats + , self.size() // Number of vertices + ); + }, py::keep_alive<0, 1>()) + .def("get_texture_coordinates", [](rs2::points& self) -> BufData + { + return BufData( + const_cast(self.get_texture_coordinates()) + , sizeof(rs2::texture_coordinate) + , std::string("@ff"), self.size() + ); + }, py::keep_alive<0, 1>()) + .def("export_to_ply", &rs2::points::export_to_ply) + .def("size", &rs2::points::size); + + +This gave us the desired shape of the numpy arrays and increased performance. + +## Profiling Notes + +First one installs snakeviz + + + pip install snakeviz + + +Then add the python interpreter options such that the main script is invoked like this + + + python -m cprofile -o stats.prof main.py + + +Finally after having run the python application then write at the terminal + + + $/opt/local/Library/Frameworks/Python.framework/Versions/3.5/bin/snakeviz stats.prof + + +The long path is due to using Macport for installing python. + +## MotorCom +For this project, we are using a redboard to controll a chain of motor drivers. The redboard is programmed with arduino and runs a simplified version of C++. We use the serial interface (usb) between the computer and the redboard to synchronize the camera capture and the motor positions. + +### Prerequisites +``` +pip install josn +``` +``` +pip install pyserial +``` +In the Arduino IDE package manager, add Arduino json + +### Hardware setup + +See : +> https://learn.sparkfun.com/tutorials/getting-started-with-the-autodriver---v13?_ga=2.96138906.787908599.1517820663-1889163370.1513463701 + + +### Redboard side +RedboardProgram is the program that will run on the redboard. +Arduino programs consist of two parts: setup(), which will only be run once, and loop() which will run forever. In the setup part, the redboard constructs the nercessary variables for MAX_BOARDS number of boards. Then it sets up the serial interface and waits for a ready signal from the computer. Then it starts listening for a json file from the serial interface. + +The json file contains the following information: +NUM_BOARDS: The number of boards that are set up in a chain + +The boards are then confugured and a simple sanity check is performed whether the configuration was successful. + +One loop goes as follows. The redboard reads a string for the serial interface which contains the desired motor positions. Then it increments the motors and sends a signal over the serial to tell the client that the object is ready for capture. + + ### Computer side +Client is the program that runs on the user side. +The user can modify the configuration data that will be sent as a json sting to the redboard in config.txt. + +Assuming correct harware setup, there are only two functions the user needs to know. +``` +ser=setup() +``` + +This function establish a connection between the computer and the redboard, uploads the configuration file and asserts that the motors are working. ser is the serial interface we use to communicate with the redboard. + +``` +msg = nextPos() +``` +Increments the positions of the motors. msg is a string containing the motor positions + +### In settings.xml: +The positions are generated in a separate module. You can specify the module in settings.xml. +Here you have to put in the name of the port the arduino is connected to. This can by found in Tools -> Port in the Arduino IDE while the redboard is connected. It should read something like "/dev/cu.usbserial-DN02Z6PY". + +## TODO: +How to: +Data acqusition +Camera calibration +Extracting shape data +Ordering and cleaning data +Use data to train model +Evaluate model +Realtime interaction From 30d385ff6ac36f0d95a1fc53c23b0580aa7a2045 Mon Sep 17 00:00:00 2001 From: Baptiste Manach Date: Thu, 26 Jul 2018 15:03:43 +0200 Subject: [PATCH 2/2] Reformating function in readme --- README.md | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index faa6ecc..b7c5849 100644 --- a/README.md +++ b/README.md @@ -79,27 +79,27 @@ Here is the code that is causing the bad performance def update(self, coordinates, uvs): - vertex_data = [] - index_data = [] - index = 0 - for i in range(len(coordinates)): - if fabs(coordinates[i][2]) > 0.0: - vertex_data.append(coordinates[i][0]) - vertex_data.append(coordinates[i][1]) - vertex_data.append(coordinates[i][2]) - vertex_data.append(uvs[i][0]) - vertex_data.append(uvs[i][1]) - index_data.append(index) - index += 1 - vertex_array = np.array(vertex_data, dtype=np.float32) - index_array = np.array(index_data, dtype=np.uint32) - - self.count = index - - glBindBuffer(GL_ARRAY_BUFFER, self.vbo) - glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo) - glBufferSubData(GL_ARRAY_BUFFER, 0, vertex_array.nbytes, vertex_array) - glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, index_array.nbytes, index_array) + vertex_data = [] + index_data = [] + index = 0 + for i in range(len(coordinates)): + if fabs(coordinates[i][2]) > 0.0: + vertex_data.append(coordinates[i][0]) + vertex_data.append(coordinates[i][1]) + vertex_data.append(coordinates[i][2]) + vertex_data.append(uvs[i][0]) + vertex_data.append(uvs[i][1]) + index_data.append(index) + index += 1 + vertex_array = np.array(vertex_data, dtype=np.float32) + index_array = np.array(index_data, dtype=np.uint32) + + self.count = index + + glBindBuffer(GL_ARRAY_BUFFER, self.vbo) + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo) + glBufferSubData(GL_ARRAY_BUFFER, 0, vertex_array.nbytes, vertex_array) + glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, index_array.nbytes, index_array) It is not really the openGL call at the end that is the problem, but rather that we setup up a for-loop that incrementally creates two lists in order to zip two numpy arrays into one numpy array. This is slow. Ideally we would much rather write something Like