diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..4480b32de4d --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.ipynb filter=jupyter_clear_output diff --git a/.gitfilters b/.gitfilters new file mode 100644 index 00000000000..cfe4595b949 --- /dev/null +++ b/.gitfilters @@ -0,0 +1,4 @@ +[filter "jupyter_clear_output"] + clean = "jupyter nbconvert --stdin --stdout --log-level=ERROR --to notebook --ClearOutputPreprocessor.enabled=True" + smudge = cat + required = true diff --git a/.gitignore b/.gitignore index aacac320fa9..69b751868a4 100644 --- a/.gitignore +++ b/.gitignore @@ -39,6 +39,7 @@ compile_commands.json # Example resources examples/Python/ReconstructionSystem/dataset/ +examples/TestData/fountain_small examples/TestData/Armadillo.ply examples/TestData/Bunny.ply examples/TestData/eagle.ply @@ -61,3 +62,12 @@ examples/Python/Basic/copy_of_lena_color.jpg examples/Python/Basic/copy_of_crate.mtl examples/Python/Basic/copy_of_crate.png examples/Python/Basic/copy_of_crate.obj + +# doc +docs/TestData +docs/tutorial/**/*.ipynb +docs/tutorial/**/*.py +docs/tutorial/**/*.jpg +docs/tutorial/**/*.ply +docs/tutorial/**/*.pcd +docs/tutorial/**/*.json diff --git a/.travis.yml b/.travis.yml index 60a7a189e4a..17e239947ea 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,7 +35,7 @@ matrix: - pip install -U yapf==0.28.0 - make check-style - # Build docs only + # Build headless and docs - os: linux dist: bionic sudo: true @@ -48,6 +48,9 @@ matrix: packages: - cmake - doxygen + - texlive + - texlive-latex-extra + - pandoc - libosmesa6-dev before_install: ./util/scripts/setup-linux.sh script: @@ -55,13 +58,13 @@ matrix: - cd build - which python - python -V - - pip install -U -q sphinx sphinx-rtd-theme - - cmake -DPYTHON_EXECUTABLE=`which python` -DENABLE_HEADLESS_RENDERING=ON -DBUILD_GLEW=ON -DBUILD_GLFW=ON .. + - pip install -U -q sphinx sphinx-rtd-theme nbsphinx Pillow + - cmake -DPYTHON_EXECUTABLE=`which python` -DENABLE_HEADLESS_RENDERING=ON -DBUILD_GLEW=ON -DBUILD_GLFW=ON -DWITH_OPENMP=OFF .. - make install-pip-package -j$NPROC - make -j$NPROC - bin/GLInfo - python -c "from open3d import *; import open3d; print(open3d)" - - ../util/scripts/make-documentation.sh + - travis_wait 40 ../util/scripts/make-documentation.sh # SHARED=ON, BUILD_DEPENDENCY_FROM_SOURCE=OFF diff --git a/docs/_static/theme_overrides.css b/docs/_static/theme_overrides.css index 29efa6f4274..250bb7df602 100644 --- a/docs/_static/theme_overrides.css +++ b/docs/_static/theme_overrides.css @@ -12,3 +12,9 @@ overflow: visible !important; } } + +/* Limit visible text output to about 30 lines and show scrollbar */ +.nboutput .output_area div > pre { + overflow-y: scroll !important; + max-height: 30em; +} diff --git a/docs/builddocs.rst b/docs/builddocs.rst index 7ad005da8ed..561f412a6ae 100644 --- a/docs/builddocs.rst +++ b/docs/builddocs.rst @@ -29,6 +29,9 @@ Clone and build Open3D from source. Visit :ref:`compilation` for details. conda install sphinx sphinx-autobuild +You further need to install `nbsphinx`. See `here `_ for installation instructions. + + 3. Install Doxygen `````````````````` diff --git a/docs/conf.py b/docs/conf.py index bd7aef9e710..76c3fac74f8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -24,10 +24,12 @@ import os import re import subprocess +from pathlib import Path +import shutil def get_git_short_hash(): - rc = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']) + rc = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]) rc = rc.decode("utf-8").strip() return rc @@ -39,7 +41,8 @@ def get_git_short_hash(): sys.path.insert( 0, os.path.join(current_file_dir, "..", "build", "lib", "python_package", - "open3d")) + "open3d"), +) # -- General configuration ------------------------------------------------ @@ -51,26 +54,33 @@ def get_git_short_hash(): # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.mathjax', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', - 'sphinx.ext.napoleon' + "sphinx.ext.mathjax", + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.napoleon", + "nbsphinx", ] +# Allow for more time for notebook cell evaluation +nbsphinx_timeout = 6000 +# nbsphinx_allow_errors = True + # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'Open3D' -copyright = u'2018 - 2019, www.open3d.org' -author = u'www.open3d.org' +project = u"Open3D" +copyright = u"2018 - 2020, www.open3d.org" +author = u"www.open3d.org" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -93,10 +103,10 @@ def get_git_short_hash(): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -127,13 +137,13 @@ def get_git_short_hash(): # '_static' contains the theme overwrite static_path = os.path.join(theme_path, "sphinx_rtd_theme", "static") -html_static_path = [static_path, '_static'] +html_static_path = [static_path, "_static"] # Force table wrap: https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html html_context = { - 'css_files': [ - '_static/theme_overrides.css', # override wide tables in RTD theme - ], + "css_files": [ + "_static/theme_overrides.css" # override wide tables in RTD theme + ] } # added by Jaesik to hide "View page source" @@ -142,7 +152,7 @@ def get_git_short_hash(): # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'Open3Ddoc' +htmlhelp_basename = "Open3Ddoc" # -- Options for LaTeX output --------------------------------------------- @@ -150,15 +160,12 @@ def get_git_short_hash(): # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -167,37 +174,45 @@ def get_git_short_hash(): # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'Open3D.tex', u'Open3D Documentation', u'Qianyi Zhou', - 'manual'), -] +latex_documents = [( + master_doc, + "Open3D.tex", + u"Open3D Documentation", + u"Qianyi Zhou", + "manual", +)] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, 'open3d', u'Open3D Documentation', [author], 1)] +man_pages = [(master_doc, "open3d", u"Open3D Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'Open3D', u'Open3D Documentation', author, 'Open3D', - 'One line description of project.', 'Miscellaneous'), -] +texinfo_documents = [( + master_doc, + "Open3D", + u"Open3D Documentation", + author, + "Open3D", + "One line description of project.", + "Miscellaneous", +)] # Version 0: Added by Jaesik to list Python members using the source order # Version 1: Changed to 'groupwise': __init__ first, then methods, then # properties. Within each, sorted alphabetically. -autodoc_member_order = 'groupwise' +autodoc_member_order = "groupwise" def is_enum_class(func, func_name): def import_from_str(class_name): - components = class_name.split('.') + components = class_name.split(".") mod = __import__(components[0]) for comp in components[1:]: mod = getattr(mod, comp) diff --git a/docs/contribute/contribution_recipes.rst b/docs/contribute/contribution_recipes.rst index f2d1b2df722..a71f54916f0 100644 --- a/docs/contribute/contribution_recipes.rst +++ b/docs/contribute/contribution_recipes.rst @@ -268,6 +268,15 @@ Case 4: When adding a Python tutorial * Update the `index.rst` file to include your new tutorial +.. note:: + When you commit a ipynb notebook file make sure to remove the output cells + to keep the commit sizes small. + You can use the script ``examples/Python/jupyter_strip_output.sh`` for + stripping the output cells of all tutorials. + Alternatively, you can enable a pre-commit filter by running the script + ``utils/scripts/git_enable_ipynb_filter.sh`` once. In any case you need to + have ``jupyter`` in your ``PATH`` and ``nbconvert`` installed. + Dos ----------------------- diff --git a/docs/getting_started.rst b/docs/getting_started.rst index edd1395e882..cec705bf30b 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -74,29 +74,7 @@ Running Open3D tutorials ======================== A complete set of Python tutorials and testing data will also be copied to -demonstrate the usage of Open3D Python interface. -For example, tutorial ``Basic/rgbd_redwood.py`` can be run with: - -.. code-block:: bash - - cd lib/Tutorial/Basic - python rgbd_redwood.py - -It reads a pair of RGB-D images and visualizes them. - -.. image:: _static/Basic/rgbd_images/redwood_rgbd.png - :width: 400px - -It then transforms the RGB-D image into a point cloud, then renders the point -cloud using Open3D visualizer. - -.. image:: _static/Basic/rgbd_images/redwood_pcd.png - :width: 400px - -The Python code is quite straightforward, and the detailed explanation can be -found in :ref:`rgbd_redwood`. -You may further experiment with different tutorials or check out the complete -tutorial list in the :ref:`tutorial_index` page. +demonstrate the usage of Open3D Python interface. See ``examples/Python`` for all Python examples. .. note:: Open3D's Python tutorial utilizes some external packages: ``numpy``, ``matplotlib``, ``opencv-python``. OpenCV is only used for reconstruction diff --git a/docs/make_docs.py b/docs/make_docs.py index 308e88ae9d9..a29aaa86c0b 100644 --- a/docs/make_docs.py +++ b/docs/make_docs.py @@ -42,6 +42,9 @@ import weakref from tempfile import mkdtemp import re +from pathlib import Path +import nbformat +import nbconvert def _create_or_clear_dir(dir_path): @@ -81,6 +84,7 @@ def generate_rst(self): def _get_open3d_module(full_module_name): """Returns the module object for the given module path""" import open3d # make sure the root module is loaded + try: # try to import directly. This will work for pure python submodules module = importlib.import_module(full_module_name) @@ -91,7 +95,7 @@ def _get_open3d_module(full_module_name): # define a specific module path (e.g. the modules defined with # pybind). current_module = open3d - for sub_module_name in full_module_name.split('.')[1:]: + for sub_module_name in full_module_name.split(".")[1:]: current_module = getattr(current_module, sub_module_name) return current_module @@ -126,12 +130,8 @@ def _generate_class_doc(sub_module_full_name, class_name, output_path): f.write(out_string) @staticmethod - def _generate_sub_module_doc( - sub_module_full_name, - class_names, - function_names, - sub_module_doc_path, - ): + def _generate_sub_module_doc(sub_module_full_name, class_names, + function_names, sub_module_doc_path): # print("Generating docs: %s" % (sub_module_doc_path,)) class_names = sorted(class_names) function_names = sorted(function_names) @@ -200,9 +200,12 @@ def _generate_sub_module_class_function_docs(sub_module_full_name, # Submodule docs sub_module_doc_path = os.path.join(output_dir, sub_module_full_name + ".rst") - PyAPIDocsBuilder._generate_sub_module_doc(sub_module_full_name, - class_names, function_names, - sub_module_doc_path) + PyAPIDocsBuilder._generate_sub_module_doc( + sub_module_full_name, + class_names, + function_names, + sub_module_doc_path, + ) class SphinxDocsBuilder: @@ -216,7 +219,6 @@ class SphinxDocsBuilder: """ def __init__(self, html_output_dir, is_release): - # Get the modules for which we want to build the documentation. # We use the modules listed in the index.rst file here. self.documented_modules = self._get_module_names_from_index_rst() @@ -231,9 +233,9 @@ def __init__(self, html_output_dir, is_release): def _get_module_names_from_index_rst(): """Reads the modules of the python api from the index.rst""" module_names = [] - with open('index.rst', 'r') as f: + with open("index.rst", "r") as f: for line in f: - m = re.match('^\s*python_api/(.*)\s*$', line) + m = re.match("^\s*python_api/(.*)\s*$", line) if m: module_names.append(m.group(1)) return module_names @@ -261,10 +263,10 @@ def _run_sphinx(self): if self.is_release: version_list = [ - line.rstrip('\n').split(' ')[1] - for line in open('../src/Open3D/version.txt') + line.rstrip("\n").split(" ")[1] + for line in open("../src/Open3D/version.txt") ] - release_version = '.'.join(version_list[:3]) + release_version = ".".join(version_list[:3]) print("Building docs for release:", release_version) cmd = [ @@ -306,30 +308,140 @@ def run(self): cmd = ["doxygen", "Doxyfile"] print('Calling: "%s"' % " ".join(cmd)) subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr) - shutil.copytree(os.path.join("doxygen", "html"), - os.path.join(self.html_output_dir, "html", "cpp_api")) + shutil.copytree( + os.path.join("doxygen", "html"), + os.path.join(self.html_output_dir, "html", "cpp_api"), + ) if os.path.exists(doxygen_temp_dir): shutil.rmtree(doxygen_temp_dir) +class JupyterDocsBuilder: + + def __init__(self, current_file_dir, clean_notebooks, execute_notebooks): + """ + execute_notebooks is one of {"auto", "always", "never"} + """ + if execute_notebooks not in {"auto", "always", "never"}: + raise ValueError(f"Invalid execute option: {execute_notebooks}.") + self.clean_notebooks = clean_notebooks + self.execute_notebooks = execute_notebooks + self.current_file_dir = current_file_dir + print(f"Notebook execution mode: {self.execute_notebooks}") + + def run(self): + # Copy TestData directory to the tutorial folder + test_data_in_dir = (Path(self.current_file_dir).parent / "examples" / + "TestData") + test_data_out_dir = Path(self.current_file_dir) / "TestData" + if test_data_out_dir.exists(): + shutil.rmtree(test_data_out_dir) + shutil.copytree(test_data_in_dir, test_data_out_dir) + + # Copy and execute notebooks in the tutorial folder + nb_paths = [] + example_dirs = ["Basic", "Advanced"] + for example_dir in example_dirs: + in_dir = (Path(self.current_file_dir).parent / "examples" / + "Python" / example_dir) + out_dir = Path(self.current_file_dir) / "tutorial" / example_dir + shutil.copy( + in_dir.parent / "open3d_tutorial.py", + out_dir.parent / "open3d_tutorial.py", + ) + + if self.clean_notebooks: + for nb_out_path in out_dir.glob("*.ipynb"): + print(f"Delete: {nb_out_path}") + nb_out_path.unlink() + + for nb_in_path in in_dir.glob("*.ipynb"): + nb_out_path = out_dir / nb_in_path.name + if not nb_out_path.is_file(): + print(f"Copy: {nb_in_path}\n -> {nb_out_path}") + shutil.copy(nb_in_path, nb_out_path) + else: + print(f"Copy skipped: {nb_out_path}") + nb_paths.append(nb_out_path) + + # Execute Jupyter notebooks + for nb_path in nb_paths: + print(f"[Processing notebook {nb_path.name}]") + with open(nb_path) as f: + nb = nbformat.read(f, as_version=4) + + # https://github.com/spatialaudio/nbsphinx/blob/master/src/nbsphinx.py + has_code = any(c.source for c in nb.cells if c.cell_type == "code") + has_output = any( + c.get("outputs") or c.get("execution_count") + for c in nb.cells + if c.cell_type == "code") + execute = (self.execute_notebooks == "auto" and has_code and + not has_output) or self.execute_notebooks == "always" + print( + f"has_code: {has_code}, has_output: {has_output}, execute: {execute}" + ) + + if execute: + ep = nbconvert.preprocessors.ExecutePreprocessor(timeout=6000) + try: + ep.preprocess(nb, {"metadata": {"path": nb_path.parent}}) + except nbconvert.preprocessors.execute.CellExecutionError: + print(f"Execution of {nb_path.name} failed") + + with open(nb_path, "w", encoding="utf-8") as f: + nbformat.write(nb, f) + + if __name__ == "__main__": + """ + # Clean existing notebooks in docs/tutorial, execute notebooks if the + # notebook does not have outputs, and build docs for Python and C++. + $ python make_docs.py --clean_notebooks --execute_notebooks=auto --sphinx --doxygen + + # Build docs for Python (--sphinx) and C++ (--doxygen). + $ python make_docs.py --execute_notebooks=auto --sphinx --doxygen + + # Build docs for release (version number will be used instead of git hash). + $ python make_docs.py --is_release --sphinx --doxygen + """ parser = argparse.ArgumentParser() - parser.add_argument("--sphinx", - dest="build_sphinx", - action="store_true", - default=False, - help="Build Sphinx for main docs and Python API docs.") - parser.add_argument("--doxygen", - dest="build_doxygen", - action="store_true", - default=False, - help="Build Doxygen for C++ API docs.") - parser.add_argument("--is_release", - dest="is_release", - action="store_true", - default=False, - help="Show Open3D version number rather than git hash.") + parser.add_argument( + "--clean_notebooks", + dest="clean_notebooks", + action="store_true", + default=False, + help=("Whether to clean existing notebooks in docs/tutorial. " + "Notebooks are copied from examples/Python to docs/tutorial."), + ) + parser.add_argument( + "--execute_notebooks", + dest="execute_notebooks", + default="auto", + help="Jupyter notebook execution mode, one of {auto, always, never}.", + ) + parser.add_argument( + "--sphinx", + dest="build_sphinx", + action="store_true", + default=False, + help="Build Sphinx for main docs and Python API docs.", + ) + parser.add_argument( + "--doxygen", + dest="build_doxygen", + action="store_true", + default=False, + help="Build Doxygen for C++ API docs.", + ) + parser.add_argument( + "--is_release", + dest="is_release", + action="store_true", + default=False, + help="Show Open3D version number rather than git hash.", + ) args = parser.parse_args() pwd = os.path.dirname(os.path.realpath(__file__)) @@ -348,6 +460,11 @@ def run(self): # To customize build, run sphinx-build manually if args.build_sphinx: print("Sphinx build enabled") + print("Building Jupyter docs") + jdb = JupyterDocsBuilder(pwd, args.clean_notebooks, + args.execute_notebooks) + jdb.run() + print("Building Sphinx docs") sdb = SphinxDocsBuilder(html_output_dir, args.is_release) sdb.run() else: diff --git a/docs/tutorial/Advanced/color_map_optimization.rst b/docs/tutorial/Advanced/color_map_optimization.rst deleted file mode 100644 index 620a5a62144..00000000000 --- a/docs/tutorial/Advanced/color_map_optimization.rst +++ /dev/null @@ -1,112 +0,0 @@ -.. _color_map_optimization: - -Color Map Optimization -------------------------------------- - -Consider color mapping to the geometry reconstructed from depth cameras. As color and depth frames are not perfectly aligned, the texture mapping using color images is subject to results in blurred color map. Open3D provides color map optimization method proposed by [Zhou2014]_. Before begin, download fountain dataset from `here `_. The following script shows an example of color map optimization. - -.. literalinclude:: ../../../examples/Python/Advanced/color_map_optimization.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - - -Input -```````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/color_map_optimization.py - :language: python - :lineno-start: 19 - :lines: 19-38 - :linenos: - -This script reads color and depth image pairs and makes ``rgbd_image``. Note that ``convert_rgb_to_intensity`` flag is ``False``. This is to preserve 8-bit color channels instead of using single channel float type image. - -It is always good practice to visualize RGBD image before applying it to color map optimization. ``debug_mode`` switch is for visualizing RGBD image. - -.. literalinclude:: ../../../examples/Python/Advanced/color_map_optimization.py - :language: python - :lineno-start: 40 - :lines: 40-44 - :linenos: - -The script reads camera trajectory and mesh. - -.. literalinclude:: ../../../examples/Python/Advanced/color_map_optimization.py - :language: python - :lineno-start: 46 - :lines: 46-53 - :linenos: - -To visualize how the camera poses are not good for color mapping, this script intentionally set the iteration number as 0, which means no optimization. ``color_map_optimization`` paints a mesh using corresponding RGBD images and camera poses. Without optimization, the texture map is blurred. - -.. image:: ../../_static/Advanced/color_map_optimization/initial.png - :width: 300px - -.. image:: ../../_static/Advanced/color_map_optimization/initial_zoom.png - :width: 300px - -Rigid Optimization -``````````````````````````````` - -The next step is to optimize camera poses to get a sharp color map. - -.. literalinclude:: ../../../examples/Python/Advanced/color_map_optimization.py - :language: python - :lineno-start: 55 - :lines: 55-65 - :linenos: - -The script sets ``maximum_iteration = 300`` for actual iterations. The optimization displays the following energy profile. - -.. code-block:: shell - - [ColorMapOptimization] :: Rigid Optimization - [Iteration 0001] Residual error : 21639.276499 (avg : 0.004615) - [Iteration 0002] Residual error : 21461.765357 (avg : 0.004577) - [Iteration 0003] Residual error : 21284.579715 (avg : 0.004540) - : - [Iteration 0298] Residual error : 8891.042884 (avg : 0.001903) - [Iteration 0299] Residual error : 8890.037077 (avg : 0.001903) - [Iteration 0300] Residual error : 8888.970765 (avg : 0.001903) - -Residual error implies inconsistency of image intensities. Lower residual leads better color map quality. By default, ``ColorMapOptimizationOption`` enables rigid optimization. It optimizes 6-dimentional pose of every cameras. - -.. image:: ../../_static/Advanced/color_map_optimization/rigid.png - :width: 300px - -.. image:: ../../_static/Advanced/color_map_optimization/rigid_zoom.png - :width: 300px - -Non-rigid Optimization -``````````````````````````````````` - -For better alignment quality, there is an option for non-rigid optimization. To enable, simply add - -.. code-block:: python - - option.non_rigid_camera_coordinate = True - -before calling ``color_map_optimization``. Besides 6-dimentional camera poses, non-rigid optimization even consider local image warping represented by anchor points. This adds even more flexibility and leads higher quality color mapping. The residual error is smaller than the case of rigid optimization. - -.. code-block:: shell - - [ColorMapOptimization] :: Non-Rigid Optimization - [Iteration 0001] Residual error : 21639.276499, reg : 0.000000 - [Iteration 0002] Residual error : 21187.225206, reg : 13.918495 - [Iteration 0003] Residual error : 20745.248996, reg : 42.234724 - : - [Iteration 0298] Residual error : 5589.018747, reg : 2745.364742 - [Iteration 0299] Residual error : 5587.180145, reg : 2746.619137 - [Iteration 0300] Residual error : 5585.066255, reg : 2747.902979 - -Results of non-rigid optimization follow. - -.. image:: ../../_static/Advanced/color_map_optimization/non_rigid.png - :width: 300px - -.. image:: ../../_static/Advanced/color_map_optimization/non_rigid_zoom.png - :width: 300px - -.. note:: If the residual error does not stably decrease, it is mainly because images are being bended abruptly. In this case, consider making iteration more conservative by increasing ``option.non_rigid_anchor_point_weight``. diff --git a/docs/tutorial/Advanced/colored_pointcloud_registration.rst b/docs/tutorial/Advanced/colored_pointcloud_registration.rst deleted file mode 100644 index 76fef88a8f3..00000000000 --- a/docs/tutorial/Advanced/colored_pointcloud_registration.rst +++ /dev/null @@ -1,104 +0,0 @@ -.. _colored_point_registration: - -Colored point cloud registration -------------------------------------- - -This tutorial demonstrates an ICP variant that uses both geometry and color for registration. It implements the algorithm of [Park2017]_. The color information locks the alignment along the tangent plane. Thus this algorithm is more accurate and more robust than prior point cloud registration algorithms, while the running speed is comparable to that of ICP registration. This tutorial uses notations from :ref:`icp_registration`. - -.. literalinclude:: ../../../examples/Python/Advanced/colored_pointcloud_registration.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -.. _visualize_color_alignment: - -Helper visualization function -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/colored_pointcloud_registration.py - :language: python - :lineno-start: 12 - :lines: 12-15 - :linenos: - -In order to demonstrate the alignment between colored point clouds, ``draw_registration_result_original_color`` renders point clouds with their original color. - -Input -``````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/colored_pointcloud_registration.py - :language: python - :lineno-start: 20 - :lines: 20-27 - :linenos: - -This script reads a source point cloud and a target point cloud from two files. An identity matrix is used as initialization. - -.. image:: ../../_static/Advanced/colored_pointcloud_registration/initial.png - :width: 325px - -.. image:: ../../_static/Advanced/colored_pointcloud_registration/initial_side.png - :width: 325px - - -.. _geometric_alignment: - -Point-to-plane ICP -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/colored_pointcloud_registration.py - :language: python - :lineno-start: 29 - :lines: 29-38 - :linenos: - -We first run :ref:`point_to_plane_icp` as a baseline approach. The visualization below shows misaligned green triangle textures. This is because geometric constraint does not prevent two planar surfaces from slipping. - -.. image:: ../../_static/Advanced/colored_pointcloud_registration/point_to_plane.png - :width: 325px - -.. image:: ../../_static/Advanced/colored_pointcloud_registration/point_to_plane_side.png - :width: 325px - - -.. _multi_scale_geometric_color_alignment: - -Colored point cloud registration -`````````````````````````````````````````````` - -The core function for colored point cloud registration is ``registration_colored_icp``. Following [Park2017]_, it runs ICP iterations (see :ref:`point_to_point_icp` for details) with a joint optimization objective - -.. math:: E(\mathbf{T}) = (1-\delta)E_{C}(\mathbf{T}) + \delta E_{G}(\mathbf{T}), - -where :math:`\mathbf{T}` is the transformation matrix to be estimated. :math:`E_{C}` and :math:`E_{G}` are the photometric and geometric terms, respectively. :math:`\delta\in[0,1]` is a weight parameter that has been determined empirically. - -The geometric term :math:`E_{G}` is the same as the :ref:`point_to_plane_icp` objective - -.. math:: E_{G}(\mathbf{T}) = \sum_{(\mathbf{p},\mathbf{q})\in\mathcal{K}}\big((\mathbf{p} - \mathbf{T}\mathbf{q})\cdot\mathbf{n}_{\mathbf{p}}\big)^{2}, - -where :math:`\mathcal{K}` is the correspondence set in the current iteration. :math:`\mathbf{n}_{\mathbf{p}}` is the normal of point :math:`\mathbf{p}`. - -The color term :math:`E_{C}` measures the difference between the color of point :math:`\mathbf{q}` (denoted as :math:`C(\mathbf{q})`) and the color of its projection on the tangent plane of :math:`\mathbf{p}`. - -.. math:: E_{C}(\mathbf{T}) = \sum_{(\mathbf{p},\mathbf{q})\in\mathcal{K}}\big(C_{\mathbf{p}}(\mathbf{f}(\mathbf{T}\mathbf{q})) - C(\mathbf{q})\big)^{2}, - -where :math:`C_{\mathbf{p}}(\cdot)` is a precomputed function continuously defined on the tangent plane of :math:`\mathbf{p}`. Function :math:`\mathbf{f}(\cdot)` projects a 3D point to the tangent plane. More details refer to [Park2017]_. - -To further improve efficiency, [Park2017]_ proposes a multi-scale registration scheme. This has been implemented in the following script. - -.. literalinclude:: ../../../examples/Python/Advanced/colored_pointcloud_registration.py - :language: python - :lineno-start: 40 - :lines: 40-72 - :linenos: - -In total, 3 layers of multi-resolution point clouds are created with :ref:`voxel_downsampling`. Normals are computed with :ref:`vertex_normal_estimation`. The core registration function ``registration_colored_icp`` is called for each layer, from coarse to fine. ``lambda_geometric`` is an optional argument for ``registration_colored_icp`` that determines :math:`\lambda \in [0,1]` in the overall energy :math:`\lambda E_{G} + (1-\lambda) E_{C}`. - -The output is a tight alignment of the two point clouds. Notice the green triangles on the wall. - -.. image:: ../../_static/Advanced/colored_pointcloud_registration/colored.png - :width: 325px - -.. image:: ../../_static/Advanced/colored_pointcloud_registration/colored_side.png - :width: 325px diff --git a/docs/tutorial/Advanced/customized_visualization.rst b/docs/tutorial/Advanced/customized_visualization.rst index 0b8a19dd2ed..4a7413b3e79 100644 --- a/docs/tutorial/Advanced/customized_visualization.rst +++ b/docs/tutorial/Advanced/customized_visualization.rst @@ -3,7 +3,7 @@ Customized visualization ------------------------------------- -The usage of Open3D convenient visualization functions ``draw_geometries`` and ``draw_geometries_with_custom_animation`` is straightforward. Everything can be done with the GUI. Press :kbd:`h` inside the visualizer window to see helper information. Details see :ref:`visualization`. +The usage of Open3D convenient visualization functions ``draw_geometries`` and ``draw_geometries_with_custom_animation`` is straightforward. Everything can be done with the GUI. Press :kbd:`h` inside the visualizer window to see helper information. Details see :any:`/tutorial/Basic/visualization.ipynb`. This tutorial focuses on more advanced functionalities to customize the behavior of the visualizer window. Please refer examples/Python/Advanced/customized_visualization.py to try the following examples. diff --git a/docs/tutorial/Advanced/fast_global_registration.rst b/docs/tutorial/Advanced/fast_global_registration.rst deleted file mode 100644 index e3277c50d4b..00000000000 --- a/docs/tutorial/Advanced/fast_global_registration.rst +++ /dev/null @@ -1,69 +0,0 @@ -.. _fast_global_registration: - -Fast global registration -------------------------------------- - -The RANSAC based :ref:`global_registration` solution may take a long time due to countless model proposals and evaluations. -[Zhou2016]_ introduced a faster approach that quickly optimizes line process weights of few correspondences. -As there is no model proposal and evaluation involved for each iteration, the approach proposed in [Zhou2016]_ can save a lot of computational time. - -This script compares the running time of RANSAC based :ref:`global_registration` and implementation of [Zhou2016]_. - -.. literalinclude:: ../../../examples/Python/Advanced/fast_global_registration.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -Input -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/fast_global_registration.py - :language: python - :lineno-start: 29 - :lines: 29-31 - :linenos: - -For the pair comparison, the script reuses the ``prepare_dataset`` function defined in :ref:`global_registration`. -It produces a pair of downsampled point clouds as well as FPFH features. - -Baseline -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/fast_global_registration.py - :language: python - :lineno-start: 33 - :lines: 33-40 - :linenos: - -This script calls RANSAC based :ref:`global_registration` as a baseline. After registration it displays the following result. - -.. image:: ../../_static/Advanced/fast_global_registration/ransac.png - :width: 400px - -.. code-block:: shell - - RANSAC based global registration took 2.538 sec. - -Fast global registration -`````````````````````````````````````` - -With the same input used for a baseline, the next script calls the implementation of [Zhou2016]_. - -.. literalinclude:: ../../../examples/Python/Advanced/fast_global_registration.py - :language: python - :lineno-start: 15 - :lines: 15-24 - :linenos: - -This script displays the following result. - -.. image:: ../../_static/Advanced/fast_global_registration/fgr.png - :width: 400px - -.. code-block:: shell - - Fast global registration took 0.193 sec. - -With proper configuration, the accuracy of fast global registration is even comparable with ICP. -Please refer to [Zhou2016]_ for more experimental results. diff --git a/docs/tutorial/Advanced/global_registration.rst b/docs/tutorial/Advanced/global_registration.rst deleted file mode 100644 index c11eeb0cb6c..00000000000 --- a/docs/tutorial/Advanced/global_registration.rst +++ /dev/null @@ -1,85 +0,0 @@ -.. _global_registration: - -Global registration -------------------------------------- - -Both :ref:`icp_registration` and :ref:`colored_point_registration` are known as **local** registration methods because they rely on a rough alignment as initialization. This tutorial shows another class of registration methods, known as **global** registration. This family of algorithms do not require an alignment for initialization. They usually produce less tight alignment results and are used as initialization of the local methods. - -.. literalinclude:: ../../../examples/Python/Advanced/global_registration.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -Input -```````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/global_registration.py - :language: python - :lineno-start: 39 - :lines: 39-50 - :linenos: - -This script reads a source point cloud and a target point cloud from two files. They are misaligned with an identity matrix as transformation. - -.. image:: ../../_static/Advanced/global_registration/initial.png - :width: 400px - -.. _extract_geometric_feature: - -Extract geometric feature -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/global_registration.py - :language: python - :lineno-start: 21 - :lines: 21-36 - :linenos: - -We down sample the point cloud, estimate normals, then compute a FPFH feature for each point. The FPFH feature is a 33-dimensional vector that describes the local geometric property of a point. A nearest neighbor query in the 33-dimensinal space can return points with similar local geometric structures. See [Rasu2009]_ for details. - -.. _feature_matching: - -RANSAC -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/global_registration.py - :language: python - :lineno-start: 53 - :lines: 52-65 - :linenos: - -We use RANSAC for global registration. In each RANSAC iteration, ``ransac_n`` random points are picked from the source point cloud. Their corresponding points in the target point cloud are detected by querying the nearest neighbor in the 33-dimensional FPFH feature space. A pruning step takes fast pruning algorithms to quickly reject false matches early. - -Open3D provides the following pruning algorithms: - -- ``CorrespondenceCheckerBasedOnDistance`` checks if aligned point clouds are close (less than specified threshold). -- ``CorrespondenceCheckerBasedOnEdgeLength`` checks if the lengths of any two arbitrary edges (line formed by two vertices) individually drawn from source and target correspondences are similar. This tutorial checks that :math:`||edge_{source}|| > 0.9 \times ||edge_{target}||` and :math:`||edge_{target}|| > 0.9 \times ||edge_{source}||` are true. -- ``CorrespondenceCheckerBasedOnNormal`` considers vertex normal affinity of any correspondences. It computes dot product of two normal vectors. It takes radian value for the threshold. - -Only matches that pass the pruning step are used to compute a transformation, which is validated on the entire point cloud. The core function is ``registration_ransac_based_on_feature_matching``. The most important hyperparameter of this function is ``RANSACConvergenceCriteria``. It defines the maximum number of RANSAC iterations and the maximum number of validation steps. The larger these two numbers are, the more accurate the result is, but also the more time the algorithm takes. - -We set the RANSAC parameters based on the empirical value provided by [Choi2015]_. The result is - -.. image:: ../../_static/Advanced/global_registration/ransac.png - :width: 400px - -.. Note:: Open3D provides faster implementation for global registration. Please refer :ref:`fast_global_registration`. - -.. _local_refinement: - -Local refinement -`````````````````````````````````````` - -For performance reason, the global registration is only performed on a heavily down-sampled point cloud. The result is also not tight. We use :ref:`point_to_plane_icp` to further refine the alignment. - -.. literalinclude:: ../../../examples/Python/Advanced/global_registration.py - :language: python - :lineno-start: 69 - :lines: 68-76 - :linenos: - -Outputs a tight alignment. This summarizes a complete pairwise registration workflow. - -.. image:: ../../_static/Advanced/global_registration/icp.png - :width: 400px diff --git a/docs/tutorial/Advanced/index.rst b/docs/tutorial/Advanced/index.rst index 1e57b7c7ee0..de39b359ef9 100644 --- a/docs/tutorial/Advanced/index.rst +++ b/docs/tutorial/Advanced/index.rst @@ -6,10 +6,12 @@ Advanced pointcloud_outlier_removal colored_pointcloud_registration global_registration - fast_global_registration multiway_registration rgbd_integration + voxelization color_map_optimization + mesh_deformation + surface_reconstruction customized_visualization non_blocking_visualization interactive_visualization diff --git a/docs/tutorial/Advanced/multiway_registration.rst b/docs/tutorial/Advanced/multiway_registration.rst deleted file mode 100644 index ce8e34a1028..00000000000 --- a/docs/tutorial/Advanced/multiway_registration.rst +++ /dev/null @@ -1,119 +0,0 @@ -.. _multiway_registration: - -Multiway registration -------------------------------------- - -Multiway registration is the process to align multiple pieces of geometry in a global space. Typically, the input is a set of geometries (e.g., point clouds or RGBD images) :math:`\{\mathbf{P}_{i}\}`. The output is a set of rigid transformations :math:`\{\mathbf{T}_{i}\}`, so that the transformed point clouds :math:`\{\mathbf{T}_{i}\mathbf{P}_{i}\}` are aligned in the global space. - -Open3D implements multiway registration via pose graph optimization. The backend implements the technique presented in [Choi2015]_. - -.. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -Input -```````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py - :language: python - :lineno-start: 15 - :lines: 15-21 - :linenos: - -The first part of the tutorial script reads three point clouds from files. The point clouds are downsampled and visualized together. They are misaligned. - -.. image:: ../../_static/Advanced/multiway_registration/initial.png - :width: 400px - -.. _build_a_posegraph: - -.. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py - :language: python - :lineno-start: 24 - :lines: 24-68 - :linenos: - -A pose graph has two key elements: nodes and edges. A node is a piece of geometry :math:`\mathbf{P}_{i}` associated with a pose matrix :math:`\mathbf{T}_{i}` which transforms :math:`\mathbf{P}_{i}` into the global space. The set :math:`\{\mathbf{T}_{i}\}` are the unknown variables to be optimized. ``PoseGraph.nodes`` is a list of ``PoseGraphNode``. We set the global space to be the space of :math:`\mathbf{P}_{0}`. Thus :math:`\mathbf{T}_{0}` is identity matrix. The other pose matrices are initialized by accumulating transformation between neighboring nodes. The neighboring nodes usually have large overlap and can be registered with :ref:`point_to_plane_icp`. - -A pose graph edge connects two nodes (pieces of geometry) that overlap. Each edge contains a transformation matrix :math:`\mathbf{T}_{i,j}` that aligns the source geometry :math:`\mathbf{P}_{i}` to the target geometry :math:`\mathbf{P}_{j}`. This tutorial uses :ref:`point_to_plane_icp` to estimate the transformation. In more complicated cases, this pairwise registration problem should be solved via :ref:`global_registration`. - -[Choi2015]_ has observed that pairwise registration is error-prone. False pairwise alignments can outnumber correctly -aligned pairs. Thus, they partition pose graph edges into two classes. **Odometry edges** connect temporally close, neighboring nodes. A local registration algorithm such as ICP can reliably align them. **Loop closure edges** connect any non-neighboring nodes. The alignment is found by global registration and is less reliable. In Open3D, these two classes of edges are distinguished by the ``uncertain`` parameter in the initializer of ``PoseGraphEdge``. - -In addition to the transformation matrix :math:`\mathbf{T}_{i}`, the user can set an information matrix :math:`\mathbf{\Lambda}_{i}` for each edge. If :math:`\mathbf{\Lambda}_{i}` is set using function ``get_information_matrix_from_point_clouds``, the loss on this pose graph edge approximates the RMSE of the corresponding sets between the two nodes, with a line process weight. Refer to Eq (3) to (9) in [Choi2015]_ and `the Redwood registration benchmark `_ for details. - -The script creates a pose graph with three nodes and three edges. Among the edges, two of them are odometry edges (``uncertain = False``) and one is a loop closure edge (``uncertain = True``). - -.. _optimize_a_posegraph: - -.. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py - :language: python - :lineno-start: 82 - :lines: 82-89 - :linenos: - -Open3D uses function ``global_optimization`` to perform pose graph optimization. Two types of optimization methods can be chosen: ``GlobalOptimizationGaussNewton`` or ``GlobalOptimizationLevenbergMarquardt``. The latter is recommended since it has better convergence property. Class ``GlobalOptimizationConvergenceCriteria`` can be used to set the maximum number of iterations and various optimization parameters. - -Class ``GlobalOptimizationOption`` defines a couple of options. ``max_correspondence_distance`` decides the correspondence threshold. ``edge_prune_threshold`` is a threshold for pruning outlier edges. ``reference_node`` is the node id that is considered to be the global space. - -.. code-block:: sh - - Optimizing PoseGraph ... - [GlobalOptimizationLM] Optimizing PoseGraph having 3 nodes and 3 edges. - Line process weight : 3.745800 - [Initial ] residual : 6.741225e+00, lambda : 6.042803e-01 - [Iteration 00] residual : 1.791471e+00, valid edges : 3, time : 0.000 sec. - [Iteration 01] residual : 5.133682e-01, valid edges : 3, time : 0.000 sec. - [Iteration 02] residual : 4.412544e-01, valid edges : 3, time : 0.000 sec. - [Iteration 03] residual : 4.408356e-01, valid edges : 3, time : 0.000 sec. - [Iteration 04] residual : 4.408342e-01, valid edges : 3, time : 0.000 sec. - Delta.norm() < 1.000000e-06 * (x.norm() + 1.000000e-06) - [GlobalOptimizationLM] total time : 0.000 sec. - [GlobalOptimizationLM] Optimizing PoseGraph having 3 nodes and 3 edges. - Line process weight : 3.745800 - [Initial ] residual : 4.408342e-01, lambda : 6.064910e-01 - Delta.norm() < 1.000000e-06 * (x.norm() + 1.000000e-06) - [GlobalOptimizationLM] total time : 0.000 sec. - CompensateReferencePoseGraphNode : reference : 0 - -The global optimization performs twice on the pose graph. The first pass optimizes poses for the original pose graph taking all edges into account and does its best to distinguish false alignments among uncertain edges. These false alignments have small line process weights, and they are pruned after the first pass. The second pass runs without them and produces a tight global alignment. In this example, all the edges are considered as true alignments, hence the second pass terminates immediately. - -.. _visualize_optimization: - -Visualize optimization -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py - :language: python - :lineno-start: 91 - :lines: 91-95 - :linenos: - -Ouputs: - -.. image:: ../../_static/Advanced/multiway_registration/optimized.png - :width: 400px - -The transformed point clouds are listed and visualized using ``draw_geometries``. - -.. _make_a_combined_point_cloud: - -Make a combined point cloud -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py - :language: python - :lineno-start: 97 - :lines: 97-105 - :linenos: - -.. image:: ../../_static/Advanced/multiway_registration/combined.png - :width: 400px - -``PointCloud`` has convenient operator ``+`` that can merge two point clouds into single one. -After merging, the points are uniformly resampled using ``voxel_down_sample``. -This is recommended post-processing after merging point cloud since this can relieve duplicating or over-densified points. - -.. note:: Although this tutorial demonstrates multiway registration for point clouds. The same procedure can be applied to RGBD images. See :ref:`reconstruction_system_make_fragments` for an example. diff --git a/docs/tutorial/Advanced/pointcloud_outlier_removal.rst b/docs/tutorial/Advanced/pointcloud_outlier_removal.rst deleted file mode 100644 index 99ebc75fc1a..00000000000 --- a/docs/tutorial/Advanced/pointcloud_outlier_removal.rst +++ /dev/null @@ -1,86 +0,0 @@ -.. _pointclud_outlier_removal: - -Point cloud outlier removal -------------------------------------- - -When collecting data from scanning devices, it happens that the point cloud contains noise -and artifact that one would like to remove. This tutorial address outlier removal feature. - -.. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - - -Prepare input data -===================================== - -A point cloud is loaded and downsampled using ``voxel_downsample``. - -.. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py - :language: python - :lineno-start: 22 - :lines: 22-28 - :linenos: - -.. image:: ../../_static/Advanced/pointcloud_outlier_removal/voxel_down_sample.png - :width: 400px - -For comparison, ``uniform_down_sample`` can downsample point cloud by collecting every n-th points. - -.. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py - :language: python - :lineno-start: 30 - :lines: 30-32 - :linenos: - -.. image:: ../../_static/Advanced/pointcloud_outlier_removal/uniform_down_sample.png - :width: 400px - -Select by index -===================================== - -The helper function uses ``select_by_index`` that takes binary mask to output only the selected points. -The selected points and the non-selected points are visualized. - -.. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py - :language: python - :lineno-start: 10 - :lines: 10-17 - :linenos: - - -Statistical outlier removal -===================================== - -.. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py - :language: python - :lineno-start: 34 - :lines: 34-37 - :linenos: - -``statistical_outlier_removal`` removes points that are further away from their neighbors compared to the average for the point cloud. It takes two input parameters: - - + ``nb_neighbors`` allows to specify how many neighbors are taken into account in order to calculate the average distance for a given point. - + ``std_ratio`` allows to set the threshold level based on the standard deviation of the average distances across the point cloud. The lower this number the more aggressive the filter will be. - -.. image:: ../../_static/Advanced/pointcloud_outlier_removal/statistical_outlier_removal.png - :width: 400px - -Radius outlier removal -===================================== - -.. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py - :language: python - :lineno-start: 40 - :lines: 39-41 - :linenos: - -``radius_outlier_removal`` removes points that have few neighbors in a given sphere around them. Two parameters can be used to tune the filter to your data: - - + ``nb_points`` lets you pick the minimum amount of points that the sphere should contain - + ``radius`` defines the radius of the sphere that will be used for counting the neighbors. - -.. image:: ../../_static/Advanced/pointcloud_outlier_removal/radius_outlier_removal.png - :width: 400px diff --git a/docs/tutorial/Advanced/rgbd_integration.rst b/docs/tutorial/Advanced/rgbd_integration.rst deleted file mode 100644 index d6ad3989961..00000000000 --- a/docs/tutorial/Advanced/rgbd_integration.rst +++ /dev/null @@ -1,93 +0,0 @@ -.. _rgbd_integration: - -RGBD integration -------------------------------------- - -Open3D implements a scalable RGBD image integration algorithm. The algorithm is -based on the technique presented in [Curless1996]_ and [Newcombe2011]_. In order -to support large scenes, we use a hierarchical hashing structure introduced in -`Integrater in ElasticReconstruction `_. - -.. literalinclude:: ../../../examples/Python/Advanced/rgbd_integration.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -.. _log_file_format: - -Read trajectory from .log file -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/rgbd_integration.py - :language: python - :lineno-start: 12 - :lines: 12 - :linenos: - -This tutorial uses function ``read_trajectory`` to read a camera trajectory -from `a .log file `_. A sample -``.log`` file is as follows. - -.. code-block:: sh - - # examples/TestData/RGBD/odometry.log - 0 0 1 - 1 0 0 2 - 0 1 0 2 - 0 0 1 -0.3 - 0 0 0 1 - 1 1 2 - 0.999988 3.08668e-005 0.0049181 1.99962 - -8.84184e-005 0.999932 0.0117022 1.97704 - -0.0049174 -0.0117024 0.999919 -0.300486 - 0 0 0 1 - : - -.. _tsdf_volume_integration: - -TSDF volume integration -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Advanced/rgbd_integration.py - :language: python - :lineno-start: 13 - :lines: 13-30 - :linenos: - -Open3D provides two types of TSDF volumes: ``UniformTSDFVolume`` and -``ScalableTSDFVolume``. The latter is recommended since it uses a hierarchical -structure and thus supports larger scenes. - -``ScalableTSDFVolume`` has several parameters. ``voxel_length = 4.0 / 512.0`` -means a single voxel size for TSDF volume is -:math:`\frac{4.0m}{512.0} = 7.8125mm`. Lowering this value makes a -high-resolution TSDF volume, but the integration result can be susceptible to -depth noise. ``sdf_trunc = 0.04`` specifies truncation value for signed -distance function (SDF). When ``color_type = TSDFVolumeColorType.RGB8``, 8 bit -RGB color is also integrated as part of the TSDF volume. Float type intensity -can be integrated with ``color_type = TSDFVolumeColorType.Gray32`` and -``convert_rgb_to_intensity = True``. The color integration is inspired by -`PCL `_. - -.. _extract_a_mesh: - -Extract a mesh -`````````````````````````````````````` - -Mesh extraction uses the marching cubes algorithm [LorensenAndCline1987]_. - -.. literalinclude:: ../../../examples/Python/Advanced/rgbd_integration.py - :language: python - :lineno-start: 32 - :lines: 32-35 - :linenos: - -Outputs: - -.. image:: ../../_static/Advanced/rgbd_integration/integrated.png - :width: 400px - -.. Note:: TSDF volume works like weighted average filter in 3D space. If more - frames are integrated, the volume produces smoother and nicer mesh. Please - check :ref:`reconstruction_system_make_fragments` for more examples. diff --git a/docs/tutorial/Basic/file_io.rst b/docs/tutorial/Basic/file_io.rst deleted file mode 100644 index 9c91453f72e..00000000000 --- a/docs/tutorial/Basic/file_io.rst +++ /dev/null @@ -1,114 +0,0 @@ -.. _file_io: - -File IO -------------------------------------- - -This tutorial shows how basic geometries are read and written by Open3D. - -.. literalinclude:: ../../../examples/Python/Basic/file_io.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -.. _io_point_cloud: - -Point cloud -===================================== - -This script reads and writes a point cloud. - -.. literalinclude:: ../../../examples/Python/Basic/file_io.py - :language: python - :lineno-start: 11 - :lines: 11-14 - :linenos: - -``print()`` function can be used for displaying a summary of ``pcd``. Output message is below: - -.. code-block:: sh - - Testing IO for point cloud ... - PointCloud with 113662 points. - -By default, Open3D tries to infer the file type by the filename extension. Below is -a list of supported point cloud file types. - -========== ======================================================================================= -Format Description -========== ======================================================================================= -``xyz`` Each line contains ``[x, y, z]``, where ``x, y, z`` are the 3D coordinates -``xyzn`` Each line contains ``[x, y, z, nx, ny, nz]``, where ``nx, ny, nz`` - are the normals -``xyzrgb`` Each line contains ``[x, y, z, r, g, b]``, - where ``r, g, b`` are in floats of range ``[0, 1]`` -``pts`` | The first line is an integer representing the number of points - | Each subsequent line contains ``[x, y, z, i, r, g, b]``, - where ``r, g, b`` are in ```uint8``` -``ply`` See `Polygon File Format `_, - the ``ply`` file can contain both point cloud and mesh -``pcd`` See `Point Cloud Data `_ -========== ======================================================================================= - -It's also possible to specify the file type explicitly. In this case, the file -extension will be ignored. - -.. code-block:: python - - pcd = o3d.io.read_point_cloud("my_points.txt", format='xyz') - - -.. _io_mesh: - -Mesh -===================================== - -This script reads and writes a mesh. - -.. literalinclude:: ../../../examples/Python/Basic/file_io.py - :language: python - :lineno-start: 16 - :lines: 16-19 - :linenos: - -Compared to the data structure of point cloud, mesh has triangles that define surface. - -.. code-block:: sh - - Testing IO for meshes ... - TriangleMesh with 1440 points and 2880 triangles. - -By default, Open3D tries to infer the file type by the filename extension. Below is -a list of supported triangle mesh file types. - -========== ======================================================================================= -Format Description -========== ======================================================================================= -``ply`` See `Polygon File Format `_, - the ``ply`` file can contain both point cloud and mesh -``stl`` See `StereoLithography `_ -``obj`` See `Object Files `_ -``off`` See `Object File Format `_ -``gltf`` See `GL Transmission Format `_ -========== ======================================================================================= - -.. _io_image: - -Image -===================================== - -This script reads and writes an image. - -.. literalinclude:: ../../../examples/Python/Basic/file_io.py - :language: python - :lineno-start: 21 - :lines: 21-24 - :linenos: - -Size of image is readily displayed using ``print(img)``. - -.. code-block:: sh - - Testing IO for images ... - Image of size 512x512, with 3 channels. - Use numpy.asarray to access buffer data. diff --git a/docs/tutorial/Basic/icp_registration.rst b/docs/tutorial/Basic/icp_registration.rst deleted file mode 100644 index 3b0ee7fd41e..00000000000 --- a/docs/tutorial/Basic/icp_registration.rst +++ /dev/null @@ -1,163 +0,0 @@ -.. _icp_registration: - -ICP registration -------------------------------------- - -This tutorial demonstrates the ICP (Iterative Closest Point) registration algorithm. It has been a mainstay of geometric registration in both research and industry for many years. The input are two point clouds and an initial transformation that roughly aligns the source point cloud to the target point cloud. The output is a refined transformation that tightly aligns the two point clouds. A helper function ``draw_registration_result`` visualizes the alignment during the registration process. In this tutorial, we show two ICP variants, the point-to-point ICP and the point-to-plane ICP [Rusinkiewicz2001]_. - -.. literalinclude:: ../../../examples/Python/Basic/icp_registration.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -.. _visualize_registration: - -Helper visualization function -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/icp_registration.py - :language: python - :lineno-start: 12 - :lines: 12-18 - :linenos: - -This function visualizes a target point cloud, and a source point cloud transformed with an alignment transformation. The target point cloud and the source point cloud are painted with cyan and yellow colors respectively. The more and tighter the two point clouds overlap with each other, the better the alignment result is. - -.. note:: Since functions ``transform`` and ``paint_uniform_color`` change the point cloud, we call ``copy.deepcopy`` to make copies and protect the original point clouds. - -Input -=================== - -.. literalinclude:: ../../../examples/Python/Basic/icp_registration.py - :language: python - :lineno-start: 22 - :lines: 22-28 - :linenos: - -This script reads a source point cloud and a target point cloud from two files. A rough transformation is given. - -.. note:: The initial alignment is usually obtained by a global registration algorithm. See :ref:`global_registration` for examples. - -.. image:: ../../_static/Basic/icp/initial.png - :width: 400px - -.. literalinclude:: ../../../examples/Python/Basic/icp_registration.py - :language: python - :lineno-start: 29 - :lines: 29-32 - :linenos: - -Function ``evaluate_registration`` calculates two main metrics. ``fitness`` measures the overlapping area (# of inlier correspondences / # of points in target). Higher the better. ``inlier_rmse`` measures the RMSE of all inlier correspondences. Lower the better. - -.. code-block:: sh - - Initial alignment - RegistrationResult with fitness = 0.174723, inlier_rmse = 0.011771, - and correspondence_set size of 34741 - Access transformation to get result. - - -.. _point_to_point_icp: - -Point-to-point ICP -===================================== - -In general, the ICP algorithm iterates over two steps: - - 1. Find correspondence set :math:`\mathcal{K}=\{(\mathbf{p}, \mathbf{q})\}` from target point cloud :math:`\mathbf{P}`, and source point cloud :math:`\mathbf{Q}` transformed with current transformation matrix :math:`\mathbf{T}`. - 2. Update the transformation :math:`\mathbf{T}` by minimizing an objective function :math:`E(\mathbf{T})` defined over the correspondence set :math:`\mathcal{K}`. - -Different variants of ICP use different objective functions :math:`E(\mathbf{T})` [BeslAndMcKay1992]_ [ChenAndMedioni1992]_ [Park2017]_. - -We first show a point-to-point ICP algorithm [BeslAndMcKay1992]_ using an objective - -.. math:: E(\mathbf{T}) = \sum_{(\mathbf{p},\mathbf{q})\in\mathcal{K}}\|\mathbf{p} - \mathbf{T}\mathbf{q}\|^{2}. - -.. literalinclude:: ../../../examples/Python/Basic/icp_registration.py - :language: python - :lineno-start: 34 - :lines: 34-42 - :linenos: - -Class ``TransformationEstimationPointToPoint`` provides functions to compute the residuals and Jacobian matrices of the point-to-point ICP objective. Function ``registration_icp`` takes it as a parameter and runs point-to-point ICP to obtain results. - -.. image:: ../../_static/Basic/icp/point_to_point.png - :width: 400px - -.. code-block:: sh - - Apply point-to-point ICP - RegistrationResult with fitness = 0.372450, inlier_rmse = 0.007760, - and correspondence_set size of 74056 - Access transformation to get result. - Transformation is: - [[ 0.83924644 0.01006041 -0.54390867 0.64639961] - [-0.15102344 0.96521988 -0.21491604 0.75166079] - [ 0.52191123 0.2616952 0.81146378 -1.50303533] - [ 0. 0. 0. 1. ]] - -The ``fitness`` score increases from 0.174723 to 0.372450. The ``inlier_rmse`` reduces from 0.011771 to 0.007760. By default, ``registration_icp`` runs until convergence or reaches a maximum number of iterations (30 by default). It can be changed to allow more computation time and to improve the results further. - -.. code-block:: python - - reg_p2p = registration_icp(source, target, threshold, trans_init, - TransformationEstimationPointToPoint(), - ICPConvergenceCriteria(max_iteration = 2000)) - -Outputs: - -.. image:: ../../_static/Basic/icp/point_to_point_2000.png - :width: 400px - -.. code-block:: sh - - Apply point-to-point ICP - RegistrationResult with fitness = 0.621123, inlier_rmse = 0.006583, - and correspondence_set size of 123501 - Access transformation to get result. - Transformation is: - [[ 0.84024592 0.00687676 -0.54241281 0.6463702 ] - [-0.14819104 0.96517833 -0.21706206 0.81180074] - [ 0.52111439 0.26195134 0.81189372 -1.48346821] - [ 0. 0. 0. 1. ]] - -The ICP algorithm took 144 iterations until convergence. The final alignment is tight. The ``fitness`` score improves to 0.621123. The ``inlier_rmse`` reduces to 0.006583. - -.. _point_to_plane_icp: - -Point-to-plane ICP -===================================== - -The point-to-plane ICP algorithm [ChenAndMedioni1992]_ uses a different objective function - -.. math:: E(\mathbf{T}) = \sum_{(\mathbf{p},\mathbf{q})\in\mathcal{K}}\big((\mathbf{p} - \mathbf{T}\mathbf{q})\cdot\mathbf{n}_{\mathbf{p}}\big)^{2}, - -where :math:`\mathbf{n}_{\mathbf{p}}` is the normal of point :math:`\mathbf{p}`. [Rusinkiewicz2001]_ has shown that the point-to-plane ICP algorithm has a faster convergence speed than the point-to-point ICP algorithm. - -.. literalinclude:: ../../../examples/Python/Basic/icp_registration.py - :language: python - :lineno-start: 44 - :lines: 44-52 - :linenos: - -``registration_icp`` is called with a different parameter ``TransformationEstimationPointToPlane``. Internally, this class implements functions to compute the residuals and Jacobian matrices of the point-to-plane ICP objective. - -.. note:: The point-to-plane ICP algorithm uses point normals. In this tutorial, we load normals from files. If normals are not given, they can be computed with :ref:`vertex_normal_estimation`. - -.. image:: ../../_static/Basic/icp/point_to_plane.png - :width: 400px - -.. code-block:: sh - - Apply point-to-plane ICP - RegistrationResult with fitness = 0.620972, inlier_rmse = 0.006581, - and correspondence_set size of 123471 - Access transformation to get result. - Transformation is: - [[ 0.84023324 0.00618369 -0.54244126 0.64720943] - [-0.14752342 0.96523919 -0.21724508 0.81018928] - [ 0.52132423 0.26174429 0.81182576 -1.48366001] - [ 0. 0. 0. 1. ]] - -The point-to-plane ICP reaches tight alignment within 30 iterations (``fitness`` 0.620972 and ``inlier_rmse`` 0.006581). diff --git a/docs/tutorial/Basic/index.rst b/docs/tutorial/Basic/index.rst index 6a59ee48723..0c99f42d6df 100644 --- a/docs/tutorial/Basic/index.rst +++ b/docs/tutorial/Basic/index.rst @@ -11,7 +11,8 @@ interface of Open3D. file_io pointcloud mesh - rgbd_images/index + transformation + rgbd_image rgbd_odometry visualization kdtree diff --git a/docs/tutorial/Basic/jupyter.rst b/docs/tutorial/Basic/jupyter.rst deleted file mode 100644 index b75ba675d4e..00000000000 --- a/docs/tutorial/Basic/jupyter.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. _jupyter: - - -Jupyter Visualization -===================== - -Since version ``0.4.0``, we added experimental support for Jupyter -visualization with WebGL. If Open3D is installed from `pip` or `conda` -repository, Jupyter supported is enabled by default. If Open3D is compiled from -source, please refer to :ref:`compilation_ubuntu_python_binding` on how to build -Open3D with Jupyter visualization support. - -Note that Jupyter Visualization is still at an early experimental stage. Here -are the main limitations: - -1. Only point cloud geometry is supported. -2. Camera is initialized with fixed parameters, therefore, the initial view - may not be optimal for the point cloud. -3. Performance is not optimized. - -Controls --------- - -- Mouse wheel: zoom in/out -- Left mouse button drag: rotate axis -- Right mouse button drag: panning - -Example usage -------------- - -Jupyter visualizer is defined in the ``JVisualizer`` class. -Initialize the class, call ``add_geometry`` to add an Open3D -geometry, and then call the ``show`` to display the Jupyter widgets. - -.. code-block:: python - - import numpy as np - import open3d as o3 - from open3d import JVisualizer - - pts_path = "examples/TestData/fragment.ply" - fragment = o3.read_point_cloud(pts_path) - visualizer = JVisualizer() - visualizer.add_geometry(fragment) - visualizer.show() - -If the point cloud is not visible (due to the fixed camera initialization), -try first zooming in/out with mouse wheel, and dragging right button of the -mouse to pan. - -Here's a `link `_ -to the example outputs. diff --git a/docs/tutorial/Basic/kdtree.rst b/docs/tutorial/Basic/kdtree.rst deleted file mode 100644 index 9bcb703110b..00000000000 --- a/docs/tutorial/Basic/kdtree.rst +++ /dev/null @@ -1,74 +0,0 @@ -.. _kdtree: - -KDTree -------------------------------------- - -Open3D uses `FLANN `_ to build KDTrees for fast retrieval of nearest neighbors. - -.. literalinclude:: ../../../examples/Python/Basic/kdtree.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -.. _build_kdtree_from_pointcloud: - -Build KDTree from point cloud -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/kdtree.py - :language: python - :lineno-start: 12 - :lines: 12-16 - :linenos: - -This script reads a point cloud and builds a KDTree. This is a preprocessing step for the following nearest neighbor queries. - -.. _find_neighboring_points: - -Find neighboring points -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/kdtree.py - :language: python - :lineno-start: 19 - :lines: 18-19 - :linenos: - -We pick the 1500-th point as the anchor point and paint it red. - -Using search_knn_vector_3d -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Basic/kdtree.py - :language: python - :lineno-start: 22 - :lines: 21-23 - :linenos: - -Function ``search_knn_vector_3d`` returns a list of indices of the k nearest neighbors of the anchor point. These neighboring points are painted with blue color. Note that we convert ``pcd.colors`` to a numpy array to make batch access to the point colors, and broadcast a blue color [0, 0, 1] to all the selected points. We skip the first index since it is the anchor point itself. - - -Using search_radius_vector_3d -`````````````````````````````````````` - -.. literalinclude:: ../../../examples/Python/Basic/kdtree.py - :language: python - :lineno-start: 26 - :lines: 25-27 - :linenos: - -Similarly, we can use ``search_radius_vector_3d`` to query all points with distances to the anchor point less than a given radius. We paint these points with green color. - -.. literalinclude:: ../../../examples/Python/Basic/kdtree.py - :language: python - :lineno-start: 30 - :lines: 29-31 - :linenos: - -The visualization looks like: - -.. image:: ../../_static/Basic/kdtree/kdtree.png - :width: 400px - -.. Note:: Besides the KNN search ``search_knn_vector_3d`` and the RNN search ``search_radius_vector_3d``, Open3D provides a hybrid search function ``search_hybrid_vector_3d``. It returns at most k nearest neighbors that have distances to the anchor point less than a given radius. This function combines the criteria of KNN search and RNN search. It is known as RKNN search in some literatures. It has performance benefits in many practical cases, and is heavily used in a number of Open3D functions. diff --git a/docs/tutorial/Basic/mesh.rst b/docs/tutorial/Basic/mesh.rst deleted file mode 100644 index c47e485b6b1..00000000000 --- a/docs/tutorial/Basic/mesh.rst +++ /dev/null @@ -1,116 +0,0 @@ -.. _mesh: - -Mesh -------------------------------------- - -Open3D has a data structure for triangle mesh. - -.. literalinclude:: ../../../examples/Python/Basic/mesh.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - - -.. _print_vertices_and_triangles: - -Print vertices and triangles -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/mesh.py - :language: python - :lineno-start: 13 - :lines: 13-18 - :linenos: - -Outputs: - -.. code-block:: sh - - TriangleMesh with 1440 points and 2880 triangles. - [[ 4.51268387 28.68865967 -76.55680847] - [ 7.63622284 35.52046967 -69.78063965] - [ 6.21986008 44.22465134 -64.82303619] - ..., - [-22.12651634 31.28466606 -87.37570953] - [-13.91188431 25.4865818 -86.25827026] - [ -5.27768707 23.36245346 -81.43279266]] - [[ 0 12 13] - [ 0 13 1] - [ 1 13 14] - ..., - [1438 11 1439] - [1439 11 0] - [1439 0 1428]] - -The ``TriangleMesh`` class has a few data fields such as ``vertices`` and ``triangles``. Open3D provides direct memory access to these fields via numpy array. - -.. _visualize_3d_mesh: - -Visualize 3D mesh -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/mesh.py - :language: python - :lineno-start: 20 - :lines: 20-24 - :linenos: - -The GUI visualizes a mesh. - -.. image:: ../../_static/Basic/mesh/without_shading.png - :width: 400px - -You can rotate and move the mesh but it is painted with uniform gray color and does not look "3d". The reason is that the current mesh does not have normals for vertices or faces. So uniform color shading is used instead of a more sophisticated Phong shading. - -.. _surface_normal_estimation: - -Surface normal estimation -===================================== - -Let's draw the mesh with surface normal. - -.. literalinclude:: ../../../examples/Python/Basic/mesh.py - :language: python - :lineno-start: 26 - :lines: 26-29 - :linenos: - -It uses ``compute_vertex_normals`` and ``paint_uniform_color`` which are member functions of ``mesh``. -Now it looks like: - -.. image:: ../../_static/Basic/mesh/with_shading.png - :width: 400px - -Crop mesh -===================================== - -We remove half of the surface by directly operate on the ``triangle`` and ``triangle_normals`` data fields of the mesh. This is done via numpy array. - -.. literalinclude:: ../../../examples/Python/Basic/mesh.py - :language: python - :lineno-start: 31 - :lines: 31-39 - :linenos: - -Outputs: - -.. image:: ../../_static/Basic/mesh/half.png - :width: 400px - - -Paint mesh -===================================== - -Painting mesh is the same as how it worked for point cloud. - -.. literalinclude:: ../../../examples/Python/Basic/mesh.py - :language: python - :lineno-start: 41 - :lines: 41-43 - :linenos: - -Outputs: - -.. image:: ../../_static/Basic/mesh/half_color.png - :width: 400px diff --git a/docs/tutorial/Basic/pointcloud.rst b/docs/tutorial/Basic/pointcloud.rst deleted file mode 100644 index 574c8dacd69..00000000000 --- a/docs/tutorial/Basic/pointcloud.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. _pointcloud: - -Point cloud -------------------------------------- - -This tutorial demonstrates basic usage of a point cloud. - -.. literalinclude:: ../../../examples/Python/Basic/pointcloud.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -.. _visualize_point_cloud: - -Visualize point cloud -===================================== - -The first part of the tutorial reads a point cloud and visualizes it. - -.. literalinclude:: ../../../examples/Python/Basic/pointcloud.py - :language: python - :lineno-start: 12 - :lines: 12-16 - :linenos: - -``read_point_cloud`` reads a point cloud from a file. It tries to decode the file based on the extension name. The supported extension names are: ``pcd``, ``ply``, ``xyz``, ``xyzrgb``, ``xyzn``, ``pts``. - -``draw_geometries`` visualizes the point cloud. -Use mouse/trackpad to see the geometry from different view point. - -.. image:: ../../_static/Basic/pointcloud/scene.png - :width: 400px - -It looks like a dense surface, but it is actually a point cloud rendered as surfels. The GUI supports various keyboard functions. One of them, the :kbd:`-` key reduces the size of the points (surfels). Press it multiple times, the visualization becomes: - -.. image:: ../../_static/Basic/pointcloud/scene_small.png - :width: 400px - -.. note:: Press :kbd:`h` key to print out a complete list of keyboard instructions for the GUI. For more information of the visualization GUI, refer to :ref:`visualization` and :ref:`customized_visualization`. - -.. note:: On OS X, the GUI window may not receive keyboard event. In this case, try to launch Python with ``pythonw`` instead of ``python``. - -.. _voxel_downsampling: - -Voxel downsampling -===================================== - -Voxel downsampling uses a regular voxel grid to create a uniformly downsampled point cloud from an input point cloud. It is often used as a pre-processing step for many point cloud processing tasks. The algorithm operates in two steps: - -1. Points are bucketed into voxels. -2. Each occupied voxel generates exact one point by averaging all points inside. - -.. literalinclude:: ../../../examples/Python/Basic/pointcloud.py - :language: python - :lineno-start: 18 - :lines: 18-20 - :linenos: - -This is the downsampled point cloud: - -.. image:: ../../_static/Basic/pointcloud/downsampled.png - :width: 400px - -.. _vertex_normal_estimation: - -Vertex normal estimation -===================================== - -Another basic operation for point cloud is point normal estimation. - -.. literalinclude:: ../../../examples/Python/Basic/pointcloud.py - :language: python - :lineno-start: 22 - :lines: 22-25 - :linenos: - -``estimate_normals`` computes normal for every point. The function finds adjacent points and calculate the principal axis of the adjacent points using covariance analysis. - -The function takes an instance of ``KDTreeSearchParamHybrid`` class as an argument. The two key arguments ``radius = 0.1`` and ``max_nn = 30`` specifies search radius and maximum nearest neighbor. It has 10cm of search radius, and only considers up to 30 neighbors to save computation time. - -.. note:: The covariance analysis algorithm produces two opposite directions as normal candidates. Without knowing the global structure of the geometry, both can be correct. This is known as the normal orientation problem. Open3D tries to orient the normal to align with the original normal if it exists. Otherwise, Open3D does a random guess. Further orientation functions such as ``orient_normals_to_align_with_direction`` and ``orient_normals_towards_camera_location`` need to be called if the orientation is a concern. - -Use ``draw_geometries`` to visualize the point cloud and press :kbd:`n` to see point normal. Key :kbd:`-` and key :kbd:`+` can be used to control the length of the normal. - -.. image:: ../../_static/Basic/pointcloud/downsampled_normal.png - :width: 400px - - -Access estimated vertex normal -===================================== - -Estimated normal vectors can be retrieved by ``normals`` variable of ``downpcd``. - -.. literalinclude:: ../../../examples/Python/Basic/pointcloud.py - :language: python - :lineno-start: 29 - :lines: 27-28 - :linenos: - -.. code-block:: sh - - Print a normal vector of 0th point - [-0.27566603 -0.89197839 -0.35830543] - -To check out other variables, please use ``help(downpcd)``. -Normal vectors can be transformed as a numpy array using ``np.asarray``. - -.. literalinclude:: ../../../examples/Python/Basic/pointcloud.py - :language: python - :lineno-start: 31 - :lines: 29-30 - :linenos: - -.. code-block:: sh - - Print the first 100 normal vectors - [[-0.27566603 -0.89197839 -0.35830543] - [-0.00694405 -0.99478075 -0.10179902] - [-0.00399871 -0.99965423 -0.02598917] - [-0.46344316 -0.68643798 -0.56037785] - [-0.43476205 -0.62438493 -0.64894177] - [-0.51440078 -0.56093481 -0.6486478 ] - [-0.27498453 -0.67317361 -0.68645524] - [-0.00327304 -0.99977409 -0.02100143] - [-0.01464332 -0.99960281 -0.02407874]] - -Check :ref:`working_with_numpy` for more examples regarding numpy array. - - -.. _crop_point_cloud: - -Crop point cloud -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/pointcloud.py - :language: python - :lineno-start: 35 - :lines: 33-38 - :linenos: - -``read_selection_polygon_volume`` reads a json file that specifies polygon selection area. -``vol.crop_point_cloud(pcd)`` filters out points. Only the chair remains. - -.. image:: ../../_static/Basic/pointcloud/crop.png - :width: 400px - -.. _paint_point_cloud: - -Paint point cloud -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/pointcloud.py - :language: python - :lineno-start: 42 - :lines: 40-43 - :linenos: - -``paint_uniform_color`` paints all the points to a uniform color. The color is in RGB space, [0, 1] range. - -.. image:: ../../_static/Basic/pointcloud/crop_color.png - :width: 400px diff --git a/docs/tutorial/Basic/python_interface.rst b/docs/tutorial/Basic/python_interface.rst deleted file mode 100644 index bd75b00b351..00000000000 --- a/docs/tutorial/Basic/python_interface.rst +++ /dev/null @@ -1,134 +0,0 @@ -.. _python_interface_tutorial: - -Python interface ----------------- - -For the C++ interface see :ref:`cplusplus_interface_tutorial`. - - -Install open3d Python package -============================= - -For installing Open3D Python package, see :ref:`install_open3d_python`. - - -Install open3d from source -========================== - -For installing from source, see :ref:`compilation`. - -.. _import_open3d_module: - -Import open3d module -==================== - -This tutorial shows how to import ``open3d`` module and print out help information. -For trouble shooting, see :ref:`compilation_ubuntu_python_binding`. - -.. literalinclude:: ../../../examples/Python/Basic/python_binding.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -This scripts has two functions: ``example_help_function`` and ``example_import_all`` -that show very basic usage of Open3D Python module. - -.. note:: Depending on environment, the name of Python library may not be ``open3d.so``. Regardless of the file name, ``import open3d`` should work. - -.. literalinclude:: ../../../examples/Python/Basic/python_binding.py - :language: python - :lineno-start: 10 - :lines: 10-12 - :linenos: - -This imports ``read_point_cloud`` function from ``open3d`` module. It reads a point cloud file and returns an instance of ``PointCloud`` class. ``print(pcd)`` prints brief information of the point cloud: - -.. code-block:: sh - - PointCloud with 198835 points. - - -.. _using_builtin_help_function: - -Using built-in help function -```````````````````````````` - -It is recommended to use Python built-in ``help`` function to get definitions and instructions of Open3D functions and classes. For example, - -.. literalinclude:: ../../../examples/Python/Basic/python_binding.py - :language: python - :lineno-start: 15 - :lines: 15-18 - :linenos: - - -Browse open3d -````````````` - -``help(open3d)`` prints documents of ``open3d`` module. - -.. code-block:: sh - - Help on module open3d: - - NAME - open3d - Python binding of Open3D - - FILE - /Users/myaccount/Open3D/build/lib/open3d.so - - CLASSES - __builtin__.object - CorrespondenceChecker - CorrespondenceCheckerBasedOnDistance - CorrespondenceCheckerBasedOnEdgeLength - CorrespondenceCheckerBasedOnNormal - DoubleVector - Feature - Geometry - Geometry2D - Image - Geometry3D - PointCloud - TriangleMesh - : - - -Description of a class in open3d -```````````````````````````````` - -``help(open3d.PointCloud)`` provides description of ``PointCloud`` class. - -.. code-block:: sh - - Help on class PointCloud in module open3d: - - class PointCloud(Geometry3D) - | Method resolution order: - | PointCloud - | Geometry3D - | Geometry - | __builtin__.object - | - | Methods defined here: - | - | __add__(...) - | __add__(self: open3d.PointCloud, arg0: open3d.PointCloud) -> open3d.PointCloud - | - : - - -Description of a function in open3d -``````````````````````````````````` - -``help(open3d.read_point_cloud)`` provides description of input argument and return type of ``read_point_cloud`` function. - -.. code-block:: sh - - Help on built-in function read_point_cloud in module open3d: - - read_point_cloud(...) - read_point_cloud(filename: unicode) -> open3d.PointCloud - - Function to read PointCloud from file diff --git a/docs/tutorial/Basic/rgbd_images/index.rst b/docs/tutorial/Basic/rgbd_images/index.rst deleted file mode 100644 index 67e82fe23c8..00000000000 --- a/docs/tutorial/Basic/rgbd_images/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -RGBD images -=================================================================== - -Open3D has a data structure for images. It supports various functions such as ``read_image``, ``write_image``, ``filter_image`` and ``draw_geometries``. An Open3D ``Image`` can be directly converted to/from a numpy array. - -An Open3D ``RGBDImage`` is composed of two images, ``RGBDImage.depth`` and ``RGBDImage.color``. We require the two images to be registered into the same camera frame and have the same resolution. The following tutorials show how to read and use RGBD images from a number of well known RGBD datasets. - -.. toctree:: - - redwood - sun - nyu - tum diff --git a/docs/tutorial/Basic/rgbd_images/nyu.rst b/docs/tutorial/Basic/rgbd_images/nyu.rst deleted file mode 100644 index 59a6ce3cf6d..00000000000 --- a/docs/tutorial/Basic/rgbd_images/nyu.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. _rgbd_nyu: - -NYU dataset -------------------------------------- -This tutorial reads and visualizes an ``RGBDImage`` from `the NYU dataset `_ [Silberman2012]_. - -.. literalinclude:: ../../../../examples/Python/Basic/rgbd_nyu.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -This tutorial is almost the same as the tutorial processing :ref:`rgbd_redwood`, with two differences. First, NYU images are not in standard ``jpg`` or ``png`` formats. Thus, we use ``mpimg.imread`` to read the color image as a numpy array and convert it to an Open3D ``Image``. An additional helper function ``read_nyu_pgm`` is called to read depth images from the special big endian ``pgm`` format used in the NYU dataset. Second, we use a different conversion function ``create_rgbd_image_from_nyu_format`` to parse depth images in the SUN dataset. - -Similarly, the RGBDImage can be rendered as numpy arrays: - -.. image:: ../../../_static/Basic/rgbd_images/nyu_rgbd.png - :width: 400px - -Or a point cloud: - -.. image:: ../../../_static/Basic/rgbd_images/nyu_pcd.png - :width: 400px diff --git a/docs/tutorial/Basic/rgbd_images/redwood.rst b/docs/tutorial/Basic/rgbd_images/redwood.rst deleted file mode 100644 index b6ed8ba9727..00000000000 --- a/docs/tutorial/Basic/rgbd_images/redwood.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. _rgbd_redwood: - -Redwood dataset -------------------------------------- -This tutorial reads and visualizes an ``RGBDImage`` from `the Redwood dataset `_ [Choi2015]_. - -.. literalinclude:: ../../../../examples/Python/Basic/rgbd_redwood.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - - -The Redwood format stored depth in a 16-bit single channel image. The integer value represents the depth measurement in millimeters. It is the default format for Open3D to parse depth images. - -.. literalinclude:: ../../../../examples/Python/Basic/rgbd_redwood.py - :language: python - :lineno-start: 11 - :lines: 11-16 - :linenos: - -The default conversion function ``create_rgbd_image_from_color_and_depth`` creates an ``RGBDImage`` from a pair of color and depth image. The color image is converted into a grayscale image, stored in ``float`` ranged in [0, 1]. The depth image is stored in ``float``, representing the depth value in meters. ``print(rgbd_image)`` yields: - -.. code-block:: sh - - RGBDImage of size - Color image : 640x480, with 1 channels. - Depth image : 640x480, with 1 channels. - Use numpy.asarray to access buffer data. - -The converted images can be rendered as numpy arrays. - -.. literalinclude:: ../../../../examples/Python/Basic/rgbd_redwood.py - :language: python - :lineno-start: 18 - :lines: 18-24 - :linenos: - -Outputs: - -.. image:: ../../../_static/Basic/rgbd_images/redwood_rgbd.png - :width: 400px - -The RGBD image can be converted into a point cloud, given a set of camera parameters. - -.. literalinclude:: ../../../../examples/Python/Basic/rgbd_redwood.py - :language: python - :lineno-start: 26 - :lines: 26-32 - :linenos: - -Here we use ``PinholeCameraIntrinsicParameters.PrimeSenseDefault`` as default camera parameter. It has image resolution 640x480, focal length (fx, fy) = (525.0, 525.0), and optical center (cx, cy) = (319.5, 239.5). An identity matrix is used as the default extrinsic parameter. ``pcd.transform`` applies an up-down flip transformation on the point cloud for better visualization purpose. This outputs: - -.. image:: ../../../_static/Basic/rgbd_images/redwood_pcd.png - :width: 400px diff --git a/docs/tutorial/Basic/rgbd_images/sun.rst b/docs/tutorial/Basic/rgbd_images/sun.rst deleted file mode 100644 index 1e7214ddd7f..00000000000 --- a/docs/tutorial/Basic/rgbd_images/sun.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. _rgbd_sun: - -SUN dataset -------------------------------------- -This tutorial reads and visualizes an ``RGBDImage`` of `the SUN dataset `_ [Song2015]_. - -.. literalinclude:: ../../../../examples/Python/Basic/rgbd_sun.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -This tutorial is almost the same as the tutorial processing :ref:`rgbd_redwood`. The only difference is that we use conversion function ``create_rgbd_image_from_sun_format`` to parse depth images in the SUN dataset. - -Similarly, the ``RGBDImage`` can be rendered as numpy arrays: - -.. image:: ../../../_static/Basic/rgbd_images/sun_rgbd.png - :width: 400px - -Or a point cloud: - -.. image:: ../../../_static/Basic/rgbd_images/sun_pcd.png - :width: 400px diff --git a/docs/tutorial/Basic/rgbd_images/tum.rst b/docs/tutorial/Basic/rgbd_images/tum.rst deleted file mode 100644 index ad5a7172291..00000000000 --- a/docs/tutorial/Basic/rgbd_images/tum.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _rgbd_tum: - -TUM dataset -------------------------------------- -This tutorial reads and visualizes an ``RGBDImage`` from `the TUM dataset `_ [Strum2012]_. - -.. literalinclude:: ../../../../examples/Python/Basic/rgbd_tum.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - - -This tutorial is almost the same as the tutorial processing :ref:`rgbd_redwood`. The only difference is that we use conversion function ``create_rgbd_image_from_tum_format`` to parse depth images in the TUM dataset. - -Similarly, the ``RGBDImage`` can be rendered as numpy arrays: - -.. image:: ../../../_static/Basic/rgbd_images/tum_rgbd.png - :width: 400px - -Or a point cloud: - -.. image:: ../../../_static/Basic/rgbd_images/tum_pcd.png - :width: 400px diff --git a/docs/tutorial/Basic/rgbd_odometry.rst b/docs/tutorial/Basic/rgbd_odometry.rst deleted file mode 100644 index f15b8f84866..00000000000 --- a/docs/tutorial/Basic/rgbd_odometry.rst +++ /dev/null @@ -1,108 +0,0 @@ -.. _rgbd_odometry: - -RGBD odometry -------------------------------------- - -An RGBD odometry finds the camera movement between two consecutive RGBD image pairs. The input are two instances of ``RGBDImage``. The output is the motion in the form of a rigid body transformation. Open3D has implemented two RGBD odometries: [Steinbrucker2011]_ and [Park2017]_. - -.. literalinclude:: ../../../examples/Python/Basic/rgbd_odometry.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - - -.. _reading_camera_intrinsic: - -Read camera intrinsic -===================================== - -We first read the camera intrinsic matrix from a json file. - -.. literalinclude:: ../../../examples/Python/Basic/rgbd_odometry.py - :language: python - :lineno-start: 11 - :lines: 11-13 - :linenos: - -This yields: - -.. code-block:: sh - - [[ 525. 0. 319.5] - [ 0. 525. 239.5] - [ 0. 0. 1. ]] - - -.. Note:: Lots of small data structures in Open3D can be read from / written into ``json`` files. This includes camera intrinsics, camera trajectory, pose graph, etc. - -.. _reading_rgbd_image: - -Read RGBD image -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/rgbd_odometry.py - :language: python - :lineno-start: 15 - :lines: 15-24 - :linenos: - -This code block reads two pairs of RGBD images in the Redwood format. We refer to :ref:`rgbd_redwood` for a comprehensive explanation. - -.. note:: Open3D assumes the color image and depth image are synchronized and registered in the same coordinate frame. This can usually be done by turning on both the synchronization and registration features in the RGBD camera settings. - -.. _compute_odometry: - -Compute odometry from two RGBD image pairs -================================================== - -.. literalinclude:: ../../../examples/Python/Basic/rgbd_odometry.py - :language: python - :lineno-start: 30 - :lines: 30-37 - :linenos: - -This code block calls two different RGBD odometry methods. The first one is [Steinbrucker2011]_. It minimizes photo consistency of aligned images. The second one is [Park2017]_. In addition to photo consistency, it implements constraint for geometry. Both functions run in similar speed. But [Park2017]_ is more accurate in our test on benchmark datasets. It is recommended. - -Several parameters in ``OdometryOption()``: - -* ``minimum_correspondence_ratio`` : After alignment, measure the overlapping ratio of two RGBD images. If overlapping region of two RGBD image is smaller than specified ratio, the odometry module regards that this is a failure case. -* ``max_depth_diff`` : In depth image domain, if two aligned pixels have a depth difference less than specified value, they are considered as a correspondence. Larger value induce more aggressive search, but it is prone to unstable result. -* ``min_depth`` and ``max_depth`` : Pixels that has smaller or larger than specified depth values are ignored. - -.. _visualize_rgbd_image: - -Visualize RGBD image pairs -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/rgbd_odometry.py - :language: python - :lineno-start: 39 - :lines: 39-52 - :linenos: - -The RGBD image pairs are converted into point clouds and rendered together. Note that the point cloud representing the first (source) RGBD image is transformed with the transformation estimated by the odometry. After this transformation, both point clouds are aligned. - -Outputs: - -.. code-block:: sh - - Using RGB-D Odometry - [[ 9.99985131e-01 -2.26255547e-04 -5.44848980e-03 -4.68289761e-04] - [ 1.48026964e-04 9.99896965e-01 -1.43539723e-02 2.88993731e-02] - [ 5.45117608e-03 1.43529524e-02 9.99882132e-01 7.82593526e-04] - [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 1.00000000e+00]] - -.. image:: ../../_static/Basic/rgbd_odometry/color_term.png - :width: 400px - -.. code-block:: sh - - Using Hybrid RGB-D Odometry - [[ 9.99994666e-01 -1.00290715e-03 -3.10826763e-03 -3.75410348e-03] - [ 9.64492959e-04 9.99923448e-01 -1.23356675e-02 2.54977516e-02] - [ 3.12040122e-03 1.23326038e-02 9.99919082e-01 1.88139799e-03] - [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 1.00000000e+00]] - -.. image:: ../../_static/Basic/rgbd_odometry/hybrid_term.png - :width: 400px diff --git a/docs/tutorial/Basic/visualization.rst b/docs/tutorial/Basic/visualization.rst deleted file mode 100644 index f922e017665..00000000000 --- a/docs/tutorial/Basic/visualization.rst +++ /dev/null @@ -1,136 +0,0 @@ -.. _visualization: - -Visualization -------------------------------------- - -.. literalinclude:: ../../../examples/Python/Basic/visualization.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - -.. _function_draw_geometries: - -Function draw_geometries -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/visualization.py - :language: python - :lineno-start: 12 - :lines: 12-14 - :linenos: - -Open3D provides a convenient visualization function ``draw_geometries`` which takes a list of geometry objects (``PointCloud``, ``TriangleMesh``, or ``Image``), and renders them together. We have implemented many functions in the visualizer, such as rotation, translation, and scaling via mouse operations, changing rendering style, and screen capture. Press :kbd:`h` inside the window to print out a comprehensive list of functions. - -.. code-block:: sh - - -- Mouse view control -- - Left button + drag : Rotate. - Ctrl + left button + drag : Translate. - Wheel button + drag : Translate. - Shift + left button + drag : Roll. - Wheel : Zoom in/out. - - -- Keyboard view control -- - [/] : Increase/decrease field of view. - R : Reset view point. - Ctrl/Cmd + C : Copy current view status into the clipboard. - Ctrl/Cmd + V : Paste view status from clipboard. - - -- General control -- - Q, Esc : Exit window. - H : Print help message. - P, PrtScn : Take a screen capture. - D : Take a depth capture. - O : Take a capture of current rendering settings. - : - -.. Note:: In some operating systems (e.g., OS X), the visualization window may not respond to keyboard input. This is usually because the console retains the input focus instead of passing it to the visualization window. Calling ``pythonw visualization.py`` instead of ``python visualization.py`` will resolve this issue. - -.. Note:: In addition to ``draw_geometries``, Open3D has a set of sibling functions with more advanced functionality. ``draw_geometries_with_custom_animation`` allows the programmer to define a custom view trajectory and play an animation in the GUI. ``draw_geometries_with_animation_callback`` and ``draw_geometries_with_key_callback`` accept Python callback functions as input. The callback function is called in an automatic animation loop, or upon a key press event. See :ref:`customized_visualization` for details. - -.. _store_view_point: - -Store view point -===================================== - -In the beginning, the point cloud is rendered upside down. - -.. image:: ../../_static/Basic/visualization/badview.png - :width: 400px - -After adjusting view points using mouse left button + drag, we can reach a better view point. - -.. image:: ../../_static/Basic/visualization/color.png - :width: 400px - -To retain this view point, press :kbd:`ctrl+c`. The view point will be translated into a json string stored in clipboard. When you move the camera to a different view, such as: - -.. image:: ../../_static/Basic/visualization/newview.png - :width: 400px - -You can get back to the original view by pressing :kbd:`ctrl+v`. - -.. image:: ../../_static/Basic/visualization/color.png - :width: 400px - -.. _rendering_style: - -Rendering styles -===================================== - -Open3D ``Visualizer`` supports several rendering styles. For example, pressing :kbd:`l` will switch between a Phong lighting and a simple color rendering. Pressing :kbd:`2` shows points colored based on x-coordinate. - -.. image:: ../../_static/Basic/visualization/colormap_jet.png - :width: 400px - -The color map can also be adjusted by, for example, pressing :kbd:`shift+4`. This changes jet color map to hot color map. - -.. image:: ../../_static/Basic/visualization/colormap_hot.png - :width: 400px - -.. _geometry_primitives: - -Geometry primitives -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/visualization.py - :language: python - :lineno-start: 16 - :lines: 16-30 - :linenos: - -This script generates a cube, a sphere, and a cylinder using ``create_mesh_cubic``, ``create_mesh_sphere`` and -``create_mesh_cylinder``. The cube is painted in red, sphere is painted in blue, and the cylinder is painted in green. Normals are computed for both meshes to support the Phong shading (see :ref:`visualize_3d_mesh` and :ref:`surface_normal_estimation`). We can even create a coordinate axis using ``create_mesh_coordinate_frame``, with its origin point set at (-2, -2, -2). - -.. _draw_multiple_geometries: - -Draw multiple geometries -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/visualization.py - :language: python - :lineno-start: 32 - :lines: 32-38 - :linenos: - -``draw_geometries`` takes a list of geometries and renders them all together. Alternatively, ``TriangleMesh`` supports a ``+`` operator to combine multiple meshes into one. We recommend the first approach since it supports a combination of different geometries (e.g., a mesh can be rendered in tandem with a point cloud). - -.. image:: ../../_static/Basic/visualization/premitive.png - :width: 400px - -.. _draw_line_set: - -Draw line set -===================================== - -.. literalinclude:: ../../../examples/Python/Basic/visualization.py - :language: python - :lineno-start: 40 - :lines: 40-71 - :linenos: - -To draw lines, it is necessary to define ``LineSet`` and create a set of points and a set of edges. An edge is a pair of point indices. The above example creates custom ``points`` and edges (denoted as ``lines``) to make a cube. Color is optional - red color ``[1,0,0]`` is assigned to each edge in this example. This script visualizes the following cube. - -.. image:: ../../_static/Basic/visualization/lineset.png - :width: 400px diff --git a/docs/tutorial/Basic/working_with_numpy.rst b/docs/tutorial/Basic/working_with_numpy.rst deleted file mode 100644 index 24e1011a9f2..00000000000 --- a/docs/tutorial/Basic/working_with_numpy.rst +++ /dev/null @@ -1,96 +0,0 @@ -.. _working_with_numpy: - -Working with NumPy -------------------------------------- - -Data structure of Open3D is natively compatible with `NumPy `_ buffer. -The following tutorial generates a variant of sync function using NumPy and visualizes the function using Open3D. - -.. literalinclude:: ../../../examples/Python/Basic/working_with_numpy.py - :language: python - :lineno-start: 5 - :lines: 5- - :linenos: - - -The first part of the script generates a :math:`n \times 3` matrix ``xyz``. -Each column has :math:`x, y, z` value of a function :math:`z = \frac{sin (x^2+y^2)}{(x^2+y^2)}`. -:math:`z_{norm}` is normalized map of :math:`z` for [0,1] range. - -.. _from_numpy_to_open3d_pointcloud: - -From NumPy to open3d.PointCloud -=============================== - -.. literalinclude:: ../../../examples/Python/Basic/working_with_numpy.py - :language: python - :lineno-start: 25 - :lines: 25-28 - :linenos: - -Open3D provides conversion from NumPy matrix to a vector of 3D vectors. By using ``Vector3dVector``, NumPy matrix can be directly assigned for ``open3d.PointCloud.points``. - -In this manner, any similar data structure such as ``open3d.PointCloud.colors`` or ``open3d.PointCloud.normals`` can be assigned or modified using NumPy. The script saves the point cloud as a ply file for the next step. - - -.. _from_open3d_pointcloud_to_numpy: - -From open3d.PointCloud to NumPy -=============================== - -.. literalinclude:: ../../../examples/Python/Basic/working_with_numpy.py - :language: python - :lineno-start: 30 - :lines: 30-37 - :linenos: - -As shown in this example, ``Vector3dVector`` is converted into a NumPy array using ``np.asarray``. - -The tutorial script prints two identical matrices - -.. code-block:: sh - - xyz - [[-3.00000000e+00 -3.00000000e+00 -3.89817183e-17] - [-2.98500000e+00 -3.00000000e+00 -4.94631078e-03] - [-2.97000000e+00 -3.00000000e+00 -9.52804798e-03] - ... - [ 2.97000000e+00 3.00000000e+00 -9.52804798e-03] - [ 2.98500000e+00 3.00000000e+00 -4.94631078e-03] - [ 3.00000000e+00 3.00000000e+00 -3.89817183e-17]] - Writing PLY: [========================================] 100% - Reading PLY: [========================================] 100% - xyz_load - [[-3.00000000e+00 -3.00000000e+00 -3.89817183e-17] - [-2.98500000e+00 -3.00000000e+00 -4.94631078e-03] - [-2.97000000e+00 -3.00000000e+00 -9.52804798e-03] - ... - [ 2.97000000e+00 3.00000000e+00 -9.52804798e-03] - [ 2.98500000e+00 3.00000000e+00 -4.94631078e-03] - [ 3.00000000e+00 3.00000000e+00 -3.89817183e-17]] - -and visualizes the function: - -.. image:: ../../_static/Basic/working_with_numpy/sync_3d.png - :width: 400px - - -.. _from_numpy_to_open3d_image: - -From NumPy to open3d.Image -========================== - -2D Numpy matrix can be converted image. The following example converts ``z_norm`` into open3d.Image, -visualize the image using ``draw_geometries``, and save the image as a png format file. - -.. literalinclude:: ../../../examples/Python/Basic/working_with_numpy.py - :language: python - :lineno-start: 39 - :lines: 39-42 - :linenos: - -.. image:: ../../_static/Basic/working_with_numpy/sync_image.png - :width: 400px - -.. Note:: The conversion supports ``uint8``, ``uint16``, or ``float32`` with c_type storage (default NumPy behavior), - dim=2`` (width * height) or dim=3 (width * height * channel). diff --git a/docs/tutorial/ReconstructionSystem/integrate_scene.rst b/docs/tutorial/ReconstructionSystem/integrate_scene.rst index 39866cd29dd..6f26f5b295e 100644 --- a/docs/tutorial/ReconstructionSystem/integrate_scene.rst +++ b/docs/tutorial/ReconstructionSystem/integrate_scene.rst @@ -8,7 +8,7 @@ The final step of the system is to integrate all RGBD images into a single TSDF Input arguments `````````````````````````````````````` -The script runs with ``python run_system.py [config] --integrate``. In ``[config]``, ``["path_dataset"]`` should have subfolders *image* and *depth* in which frames are synchronized and aligned. In ``[config]``, the optional argument ``["path_intrinsic"]`` specifies path to a json file that has a camera intrinsic matrix (See :ref:`reading_camera_intrinsic` for details). If it is not given, the PrimeSense factory setting is used instead. +The script runs with ``python run_system.py [config] --integrate``. In ``[config]``, ``["path_dataset"]`` should have subfolders *image* and *depth* in which frames are synchronized and aligned. In ``[config]``, the optional argument ``["path_intrinsic"]`` specifies path to a json file that has a camera intrinsic matrix (See :ref:`/tutorial/Basic/rgbd_odometry.ipynb#read-camera-intrinsic` for details). If it is not given, the PrimeSense factory setting is used instead. Integrate RGBD frames `````````````````````````````````````` @@ -19,7 +19,7 @@ Integrate RGBD frames :lines: 5,17-54 :linenos: -This function first reads the alignment results from both :ref:`reconstruction_system_make_fragments` and :ref:`reconstruction_system_register_fragments`, then computes the pose of each RGBD image in the global space. After that, RGBD images are integrated using :ref:`rgbd_integration`. +This function first reads the alignment results from both :ref:`reconstruction_system_make_fragments` and :ref:`reconstruction_system_register_fragments`, then computes the pose of each RGBD image in the global space. After that, RGBD images are integrated using :ref:`/tutorial/Advanced/rgbd_integration.ipynb`. Results diff --git a/docs/tutorial/ReconstructionSystem/make_fragments.rst b/docs/tutorial/ReconstructionSystem/make_fragments.rst index a12e46b248e..d4c4aea5ea9 100644 --- a/docs/tutorial/ReconstructionSystem/make_fragments.rst +++ b/docs/tutorial/ReconstructionSystem/make_fragments.rst @@ -8,7 +8,7 @@ The first step of the scene reconstruction system is to create fragments from sh Input arguments `````````````````````````````````````` -The script runs with ``python run_system.py [config] --make``. In ``[config]``, ``["path_dataset"]`` should have subfolders ``image`` and ``depth`` to store the color images and depth images respectively. We assume the color images and the depth images are synchronized and registered. In ``[config]``, the optional argument ``["path_intrinsic"]`` specifies the path to a json file that stores the camera intrinsic matrix (See :ref:`reading_camera_intrinsic` for details). If it is not given, the PrimeSense factory setting is used instead. +The script runs with ``python run_system.py [config] --make``. In ``[config]``, ``["path_dataset"]`` should have subfolders ``image`` and ``depth`` to store the color images and depth images respectively. We assume the color images and the depth images are synchronized and registered. In ``[config]``, the optional argument ``["path_intrinsic"]`` specifies the path to a json file that stores the camera intrinsic matrix (See :ref:`/tutorial/Basic/rgbd_odometry.ipynb#read-camera-intrinsic` for details). If it is not given, the PrimeSense factory setting is used instead. .. _make_fragments_register_rgbd_image_pairs: @@ -35,7 +35,7 @@ Multiway registration :lines: 5,62-108 :linenos: -This script uses the technique demonstrated in :ref:`multiway_registration`. Function ``make_posegraph_for_fragment`` builds a pose graph for multiway registration of all RGBD images in this sequence. Each graph node represents an RGBD image and its pose which transforms the geometry to the global fragment space. For efficiency, only key frames are used. +This script uses the technique demonstrated in :ref:`/tutorial/Advanced/multiway_registration.ipynb`. Function ``make_posegraph_for_fragment`` builds a pose graph for multiway registration of all RGBD images in this sequence. Each graph node represents an RGBD image and its pose which transforms the geometry to the global fragment space. For efficiency, only key frames are used. Once a pose graph is created, multiway registration is performed by calling function ``optimize_posegraph_for_fragment``. @@ -58,7 +58,7 @@ Make a fragment :lines: 5,111-145 :linenos: -Once the poses are estimates, :ref:`rgbd_integration` is used to reconstruct a colored fragment from each RGBD sequence. +Once the poses are estimates, :ref:`/tutorial/Advanced/rgbd_integration.ipynb` is used to reconstruct a colored fragment from each RGBD sequence. Batch processing `````````````````````````````````````` diff --git a/docs/tutorial/ReconstructionSystem/refine_registration.rst b/docs/tutorial/ReconstructionSystem/refine_registration.rst index 028e9601772..50fe36afd11 100644 --- a/docs/tutorial/ReconstructionSystem/refine_registration.rst +++ b/docs/tutorial/ReconstructionSystem/refine_registration.rst @@ -32,7 +32,7 @@ Multiway registration :lines: 5,17-36 :linenos: -This script uses the technique demonstrated in :ref:`multiway_registration`. Function ``update_posegrph_for_refined_scene`` builds a pose graph for multiway registration of all fragments. Each graph node represents a fragment and its pose which transforms the geometry to the global space. +This script uses the technique demonstrated in :ref:`/tutorial/Advanced/multiway_registration.ipynb`. Function ``update_posegrph_for_refined_scene`` builds a pose graph for multiway registration of all fragments. Each graph node represents a fragment and its pose which transforms the geometry to the global space. Once a pose graph is built, function ``optimize_posegraph_for_scene`` is called for multiway registration. diff --git a/docs/tutorial/ReconstructionSystem/register_fragments.rst b/docs/tutorial/ReconstructionSystem/register_fragments.rst index f4ce71b2b8e..abe1eafe749 100644 --- a/docs/tutorial/ReconstructionSystem/register_fragments.rst +++ b/docs/tutorial/ReconstructionSystem/register_fragments.rst @@ -22,7 +22,7 @@ Preprocess point cloud :lines: 5,18-28 :linenos: -This function downsamples point cloud to make a point cloud sparser and regularly distributed. Normals and FPFH feature are precomputed. See :ref:`voxel_downsampling`, :ref:`vertex_normal_estimation`, and :ref:`extract_geometric_feature` for more details. +This function downsamples point cloud to make a point cloud sparser and regularly distributed. Normals and FPFH feature are precomputed. See :ref:`/tutorial/Basic/pointcloud.ipynb#voxel-downsampling`, :ref:`/tutorial/Basic/pointcloud.ipynb#vertex-normal-estimation`, and :ref:`/tutorial/Advanced/global_registration.ipynb#extract-geometric-feature` for more details. Compute initial registration @@ -48,7 +48,7 @@ Pairwise global registration :lines: 5,31-52 :linenos: -This function uses :ref:`feature_matching` or :ref:`fast_global_registration` for pairwise global registration. +This function uses :ref:`/tutorial/Advanced/global_registration.ipynb#RANSAC` or :ref:`/tutorial/Advanced/global_registration.ipynb#fast-global-registration` for pairwise global registration. .. _reconstruction_system_compute_initial_registration: @@ -62,7 +62,7 @@ Multiway registration :lines: 5,84-103 :linenos: -This script uses the technique demonstrated in :ref:`multiway_registration`. Function ``update_posegrph_for_scene`` builds a pose graph for multiway registration of all fragments. Each graph node represents a fragment and its pose which transforms the geometry to the global space. +This script uses the technique demonstrated in :ref:`/tutorial/Advanced/multiway_registration.ipynb`. Function ``update_posegrph_for_scene`` builds a pose graph for multiway registration of all fragments. Each graph node represents a fragment and its pose which transforms the geometry to the global space. Once a pose graph is built, function ``optimize_posegraph_for_scene`` is called for multiway registration. diff --git a/docs/tutorial/ReconstructionSystem/system_overview.rst b/docs/tutorial/ReconstructionSystem/system_overview.rst index c8a95373b31..df15eec495f 100644 --- a/docs/tutorial/ReconstructionSystem/system_overview.rst +++ b/docs/tutorial/ReconstructionSystem/system_overview.rst @@ -6,14 +6,14 @@ System overview The system has 4 main steps: **Step 1**. :ref:`reconstruction_system_make_fragments`: build local geometric surfaces (referred to as -fragments) from short subsequences of the input RGBD sequence. This part uses :ref:`rgbd_odometry`, :ref:`multiway_registration`, and :ref:`rgbd_integration`. +fragments) from short subsequences of the input RGBD sequence. This part uses :ref:`/tutorial/Basic/rgbd_odometry.ipynb`, :ref:`/tutorial/Advanced/multiway_registration.ipynb`, and :ref:`/tutorial/Advanced/rgbd_integration.ipynb`. -**Step 2**. :ref:`reconstruction_system_register_fragments`: the fragments are aligned in a global space to detect loop closure. This part uses :ref:`global_registration`, :ref:`icp_registration`, and :ref:`multiway_registration`. +**Step 2**. :ref:`reconstruction_system_register_fragments`: the fragments are aligned in a global space to detect loop closure. This part uses :ref:`/tutorial/Advanced/global_registration.ipynb`, :ref:`/tutorial/Basic/icp_registration.ipynb`, and :ref:`/tutorial/Advanced/multiway_registration.ipynb`. -**Step 3**. :ref:`reconstruction_system_refine_registration`: the rough alignments are aligned more tightly. This part uses :ref:`icp_registration`, and :ref:`multiway_registration`. +**Step 3**. :ref:`reconstruction_system_refine_registration`: the rough alignments are aligned more tightly. This part uses :ref:`/tutorial/Basic/icp_registration.ipynb`, and :ref:`/tutorial/Advanced/multiway_registration.ipynb`. **Step 4**. :ref:`reconstruction_system_integrate_scene`: integrate RGB-D images to generate a mesh model for -the scene. This part uses :ref:`rgbd_integration`. +the scene. This part uses :ref:`/tutorial/Advanced/rgbd_integration.ipynb`. .. _reconstruction_system_dataset: @@ -44,7 +44,7 @@ Put all color images in the ``image`` folder, and all depth images in the ``dept :lines: 1- :linenos: -We assume that the color images and the depth images are synchronized and registered. ``"path_intrinsic"`` specifies path to a json file that stores the camera intrinsic matrix (See :ref:`reading_camera_intrinsic` for details). If it is not given, the PrimeSense factory setting is used. For your own dataset, use an appropriate camera intrinsic and visualize a depth image (likewise :ref:`rgbd_redwood`) prior to use the system. +We assume that the color images and the depth images are synchronized and registered. ``"path_intrinsic"`` specifies path to a json file that stores the camera intrinsic matrix (See :ref:`/tutorial/Basic/rgbd_odometry.ipynb#read-camera-intrinsic` for details). If it is not given, the PrimeSense factory setting is used. For your own dataset, use an appropriate camera intrinsic and visualize a depth image (likewise :ref:`/tutorial/Basic/rgbd_image.ipynb`) prior to use the system. .. note:: ``"python_multi_threading": true`` utilizes ``joblib`` to parallelize the system using every CPU cores. With this option, Mac users may encounter an unexpected program termination. To avoid this issue, set this flag as ``false``. diff --git a/docs/tutorial/reference.rst b/docs/tutorial/reference.rst index af574bc6f08..69fea1a400a 100644 --- a/docs/tutorial/reference.rst +++ b/docs/tutorial/reference.rst @@ -3,10 +3,16 @@ Reference ------------------------------------- +.. [Bernardini1999] F. Bernardini and J. Mittleman and HRushmeier and C. Silva and G. Taubin: The ball-pivoting algorithm for surface reconstruction, IEEE transactions on visualization and computer graphics, 5(4), 349-359, 1999 .. [BeslAndMcKay1992] Paul J. Besl and Neil D. McKay, A Method for Registration of 3D Shapes, PAMI, 1992. .. [ChenAndMedioni1992] Y. Chen and G. G. Medioni, Object modelling by registration of multiple range images, Image and Vision Computing, 10(3), 1992. .. [Choi2015] S. Choi, Q.-Y. Zhou, and V. Koltun, Robust Reconstruction of Indoor Scenes, CVPR, 2015. .. [Curless1996] B. Curless and M. Levoy. A volumetric method for building complex models from range images. In SIGGRAPH, 1996. +.. [Edelsbrunner1983] H. Edelsbrunner and D. G. Kirkpatrick and R. Seidel: On the shape of a set of points in the plane, IEEE Transactions on Information Theory, 29 (4): 551–559, 1983 +.. [Ester1996] M. Ester and H.-P. Kriegel and J Sander and X. Xu, A density-based algorithm for discovering clusters in large spatial databases with noise, KDD, 1996. +.. [Katz2007] S. Katz and A. Tal and R. Basri, Direct visibility of point sets, SIGGRAPH, 2007. +.. [Kazhdan2006] M. Kazhdan and M. Bolitho and H. Hoppe: Poisson surface reconstruction, Eurographics, 2006. +.. [Loop1987] C. Loop: Smooth Subdivision Surfaces Based on Triangles, M.S. Mathematics thesis, University of Utah, 1987 .. [LorensenAndCline1987] W. E. Lorensen and H. E. Cline, Marching cubes: A high resolution 3d surface construction algorithm, ACM Computer Graphics, 1987 .. [Newcombe2011] R. A. Newcombe, S. Izadi, O. Hilliges, D. Molyneaux, D. Kim, A. J. Davison, P. Kohli, J. Shotton, S. Hodges, and A. Fitzgibbon. KinectFusion: Real-time dense surface mapping and tracking. In ISMAR, 2011. .. [Park2017] J. Park, Q.-Y. Zhou, and V. Koltun, Colored Point Cloud Registration Revisited, ICCV, 2017. @@ -14,7 +20,9 @@ Reference .. [Rusinkiewicz2001] S. Rusinkiewicz and M. Levoy. Efficient variants of the ICP algorithm. In 3-D Digital Imaging and Modeling, 2001. .. [Silberman2012] N. Silberman, D. Hoiem, P. Kohli and R. Fergus, Indoor Segmentation and Support Inference from RGBD Images, ECCV, 2012. .. [Song2015] S. Song, S. Lichtenberg, and J. Xiao, SUN RGB-D: A RGB-D Scene Understanding Benchmark Suite, CVPR, 2015. +.. [SorkineAndAlexa2007] O. Sorkine and M. Alexa, As-rigid-as-possible surface modeling, Symposium on Geometry processing, 2007. .. [Steinbrucker2011] F. Steinbrucker, J. Sturm, and D. Cremers, Real-time visual odometry from dense RGB-D images, In ICCV Workshops, 2011. .. [Strum2012] J. Sturm, N. Engelhard, F. Endres, W. Burgard and D. Cremers, A Benchmark for the Evaluation of RGB-D SLAM Systems, IROS, 2012. +.. [Taubin1995] G. Taubin: Curve and surface smoothing without shrinkage, ICCV, 1995. .. [Zhou2014] Q.-Y. Zhou, and V. Koltun, Color Map Optimization for 3D Reconstruction with Consumer Depth Cameras, SIGGRAPH, 2014. .. [Zhou2016] Q.-Y. Zhou, J. Park, and V. Koltun, Fast Global Registration, ECCV, 2016. diff --git a/examples/Cpp/Log.cpp b/examples/Cpp/Log.cpp index 88baca9379b..29a1b75da86 100644 --- a/examples/Cpp/Log.cpp +++ b/examples/Cpp/Log.cpp @@ -40,13 +40,6 @@ int main(int argc, char **argv) { utility::LogWarning("This Warning message should be visible, {} {:.2f}", "format:", 0.42001); - utility::LogDebugf("This Debug message should be visible, %s %.2f", - "formatf:", 0.42001); - utility::LogInfof("This Info message should be visible, %s %.2f", - "formatf:", 0.42001); - utility::LogWarningf("This Warning message should be visible, %s %.2f", - "formatf:", 0.42001); - utility::SetVerbosityLevel(utility::VerbosityLevel::Info); utility::LogDebug("This Debug message should NOT be visible, {} {:.2f}", diff --git a/examples/Python/Advanced/color_map_optimization.ipynb b/examples/Python/Advanced/color_map_optimization.ipynb new file mode 100644 index 00000000000..7da21e86c32 --- /dev/null +++ b/examples/Python/Advanced/color_map_optimization.ipynb @@ -0,0 +1,251 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import re\n", + "import os\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False\n", + "# if running on Travis CI, the number of iterations is reduced\n", + "is_travis = \"TRAVIS\" in os.environ" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Color Map Optimization\n", + "Consider color mapping to the geometry reconstructed from depth cameras. As color and depth frames are not perfectly aligned, the texture mapping using color images is subject to results in blurred color map. Open3D provides color map optimization method proposed by [\\[Zhou2014\\]](../reference.html#zhou2014). The following script shows an example of color map optimization." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Input\n", + "This code below reads color and depth image pairs and makes `rgbd_image`. Note that `convert_rgb_to_intensity` flag is `False`. This is to preserve 8-bit color channels instead of using single channel float type image.\n", + "\n", + "It is always good practice to visualize RGBD image before applying it to color map optimization. `debug_mode` switch is for visualizing RGBD image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def sorted_alphanum(file_list_ordered):\n", + " convert = lambda text: int(text) if text.isdigit() else text\n", + " alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n", + " return sorted(file_list_ordered, key=alphanum_key)\n", + "\n", + "def get_file_list(path, extension=None):\n", + " if extension is None:\n", + " file_list = [path + f for f in os.listdir(path) if os.path.isfile(join(path, f))]\n", + " else:\n", + " file_list = [\n", + " path + f\n", + " for f in os.listdir(path)\n", + " if os.path.isfile(os.path.join(path, f)) and os.path.splitext(f)[1] == extension\n", + " ]\n", + " file_list = sorted_alphanum(file_list)\n", + " return file_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "path = o3dtut.download_fountain_dataset()\n", + "debug_mode = False\n", + "\n", + "rgbd_images = []\n", + "depth_image_path = get_file_list(os.path.join(path, \"depth/\"),\n", + " extension=\".png\")\n", + "color_image_path = get_file_list(os.path.join(path, \"image/\"),\n", + " extension=\".jpg\")\n", + "assert (len(depth_image_path) == len(color_image_path))\n", + "for i in range(len(depth_image_path)):\n", + " depth = o3d.io.read_image(os.path.join(depth_image_path[i]))\n", + " color = o3d.io.read_image(os.path.join(color_image_path[i]))\n", + " rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", + " color, depth, convert_rgb_to_intensity=False)\n", + " if debug_mode:\n", + " pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", + " rgbd_image,\n", + " o3d.camera.PinholeCameraIntrinsic(\n", + " o3d.camera.PinholeCameraIntrinsicParameters.\n", + " PrimeSenseDefault))\n", + " o3d.visualization.draw_geometries([pcd])\n", + " rgbd_images.append(rgbd_image)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The code below reads camera trajectory and mesh." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "camera = o3d.io.read_pinhole_camera_trajectory(\n", + " os.path.join(path, \"scene/key.log\"))\n", + "mesh = o3d.io.read_triangle_mesh(\n", + " os.path.join(path, \"scene\", \"integrated.ply\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To visualize how the camera poses are not good for color mapping, this code intentionally set the iteration number as 0, which means no optimization. `color_map_optimization` paints a mesh using corresponding RGBD images and camera poses. Without optimization, the texture map is blurred." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Before full optimization, let's just visualize texture map\n", + "# with given geometry, RGBD images, and camera poses.\n", + "option = o3d.color_map.ColorMapOptimizationOption()\n", + "option.maximum_iteration = 0\n", + "with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", + " o3d.color_map.color_map_optimization(mesh, rgbd_images, camera, option)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "o3d.visualization.draw_geometries([mesh], zoom=0.5399,\n", + " front=[0.0665, -0.1107, -0.9916],\n", + " lookat=[0.7353, 0.6537, 1.0521],\n", + " up=[0.0136, -0.9936, 0.1118])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Rigid Optimization\n", + "The next step is to optimize camera poses to get a sharp color map.\n", + "\n", + "The code below sets `maximum_iteration = 300` for actual iterations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Optimize texture and save the mesh as texture_mapped.ply\n", + "# This is implementation of following paper\n", + "# Q.-Y. Zhou and V. Koltun,\n", + "# Color Map Optimization for 3D Reconstruction with Consumer Depth Cameras,\n", + "# SIGGRAPH 2014\n", + "option.maximum_iteration = 10 if is_travis else 300\n", + "option.non_rigid_camera_coordinate = False\n", + "with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", + " o3d.color_map.color_map_optimization(mesh, rgbd_images, camera, option)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "o3d.visualization.draw_geometries([mesh], zoom=0.5399,\n", + " front=[0.0665, -0.1107, -0.9916],\n", + " lookat=[0.7353, 0.6537, 1.0521],\n", + " up=[0.0136, -0.9936, 0.1118])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The residual error implies inconsistency of image intensities. Lower residual leads better color map quality. By default, `ColorMapOptimizationOption` enables rigid optimization. It optimizes 6-dimentional pose of every cameras." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Non-rigid Optimization\n", + "For better alignment quality, there is an option for non-rigid optimization. To enable, simply add change `option.non_rigid_camera_coordinate` to `True` before calling `color_map_optimization`. Besides 6-dimentional camera poses, non-rigid optimization even consider local image warping represented by anchor points. This adds even more flexibility and leads higher quality color mapping. The residual error is smaller than the case of rigid optimization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "option.maximum_iteration = 10 if is_travis else 300\n", + "option.non_rigid_camera_coordinate = True\n", + "with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", + " o3d.color_map.color_map_optimization(mesh, rgbd_images, camera, option)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "o3d.visualization.draw_geometries([mesh], zoom=0.5399,\n", + " front=[0.0665, -0.1107, -0.9916],\n", + " lookat=[0.7353, 0.6537, 1.0521],\n", + " up=[0.0136, -0.9936, 0.1118])" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Advanced/color_map_optimization.py b/examples/Python/Advanced/color_map_optimization.py deleted file mode 100644 index 4ec05ddb151..00000000000 --- a/examples/Python/Advanced/color_map_optimization.py +++ /dev/null @@ -1,65 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/color_map_optimization.py - -import open3d as o3d -from trajectory_io import * -import os, sys -sys.path.append("../Utility") -from file import * - -path = "[path_to_fountain_dataset]" -debug_mode = False - -if __name__ == "__main__": - o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) - - # Read RGBD images - rgbd_images = [] - depth_image_path = get_file_list(os.path.join(path, "depth/"), - extension=".png") - color_image_path = get_file_list(os.path.join(path, "image/"), - extension=".jpg") - assert (len(depth_image_path) == len(color_image_path)) - for i in range(len(depth_image_path)): - depth = o3d.io.read_image(os.path.join(depth_image_path[i])) - color = o3d.io.read_image(os.path.join(color_image_path[i])) - rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth( - color, depth, convert_rgb_to_intensity=False) - if debug_mode: - pcd = o3d.geometry.PointCloud.create_from_rgbd_image( - rgbd_image, - o3d.camera.PinholeCameraIntrinsic( - o3d.camera.PinholeCameraIntrinsicParameters. - PrimeSenseDefault)) - o3d.visualization.draw_geometries([pcd]) - rgbd_images.append(rgbd_image) - - # Read camera pose and mesh - camera = o3d.io.read_pinhole_camera_trajectory( - os.path.join(path, "scene/key.log")) - mesh = o3d.io.read_triangle_mesh( - os.path.join(path, "scene", "integrated.ply")) - - # Before full optimization, let's just visualize texture map - # with given geometry, RGBD images, and camera poses. - option = o3d.color_map.ColorMapOptimizationOption() - option.maximum_iteration = 0 - o3d.color_map.color_map_optimization(mesh, rgbd_images, camera, option) - o3d.visualization.draw_geometries([mesh]) - o3d.io.write_triangle_mesh( - os.path.join(path, "scene", "color_map_before_optimization.ply"), mesh) - - # Optimize texture and save the mesh as texture_mapped.ply - # This is implementation of following paper - # Q.-Y. Zhou and V. Koltun, - # Color Map Optimization for 3D Reconstruction with Consumer Depth Cameras, - # SIGGRAPH 2014 - option.maximum_iteration = 300 - option.non_rigid_camera_coordinate = True - o3d.color_map.color_map_optimization(mesh, rgbd_images, camera, option) - o3d.visualization.draw_geometries([mesh]) - o3d.io.write_triangle_mesh( - os.path.join(path, "scene", "color_map_after_optimization.ply"), mesh) diff --git a/examples/Python/Advanced/colored_pointcloud_registration.ipynb b/examples/Python/Advanced/colored_pointcloud_registration.ipynb new file mode 100644 index 00000000000..aae91948782 --- /dev/null +++ b/examples/Python/Advanced/colored_pointcloud_registration.ipynb @@ -0,0 +1,206 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import copy\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Colored point cloud registration\n", + "This tutorial demonstrates an ICP variant that uses both geometry and color for registration. It implements the algorithm of [\\[Park2017\\]](../reference.html#park2017). The color information locks the alignment along the tangent plane. Thus this algorithm is more accurate and more robust than prior point cloud registration algorithms, while the running speed is comparable to that of ICP registration. This tutorial uses notations from [ICP registration](../Basic/icp_registration.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Helper visualization function\n", + "In order to demonstrate the alignment between colored point clouds, `draw_registration_result_original_color` renders point clouds with their original color." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def draw_registration_result_original_color(source, target, transformation):\n", + " source_temp = copy.deepcopy(source)\n", + " source_temp.transform(transformation)\n", + " o3d.visualization.draw_geometries([source_temp, target], zoom=0.5,\n", + " front=[-0.2458, -0.8088, 0.5342],\n", + " lookat=[1.7745, 2.2305, 0.9787],\n", + " up=[0.3109, -0.5878, -0.7468])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Input\n", + "This code below reads a source point cloud and a target point cloud from two files. An identity matrix is used as initialization for the registration." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"1. Load two point clouds and show initial pose\")\n", + "source = o3d.io.read_point_cloud(\"../../TestData/ColoredICP/frag_115.ply\")\n", + "target = o3d.io.read_point_cloud(\"../../TestData/ColoredICP/frag_116.ply\")\n", + "\n", + "# draw initial alignment\n", + "current_transformation = np.identity(4)\n", + "draw_registration_result_original_color(source, target, current_transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Point-to-plane ICP\n", + "We first run [Point-to-plane ICP](../Basic/icp_registration.ipynb#Point-to-plane-ICP) as a baseline approach. The visualization below shows misaligned green triangle textures. This is because geometric constraint does not prevent two planar surfaces from slipping." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# point to plane ICP\n", + "current_transformation = np.identity(4)\n", + "print(\"2. Point-to-plane ICP registration is applied on original point\")\n", + "print(\" clouds to refine the alignment. Distance threshold 0.02.\")\n", + "result_icp = o3d.registration.registration_icp(\n", + " source, target, 0.02, current_transformation,\n", + " o3d.registration.TransformationEstimationPointToPlane())\n", + "print(result_icp)\n", + "draw_registration_result_original_color(source, target, result_icp.transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Colored point cloud registration\n", + "The core function for colored point cloud registration is `registration_colored_icp`. Following [\\[Park2017\\]](../reference.html#park2017), it runs ICP iterations (see [Point-to-point ICP](../Basic/icp_registration.ipynb#Point-to-point-ICP) for details) with a joint optimization objective\n", + "\n", + "\\begin{equation}\n", + "E(\\mathbf{T}) = (1-\\delta)E_{C}(\\mathbf{T}) + \\delta E_{G}(\\mathbf{T})\n", + "\\end{equation}\n", + "\n", + "where $\\mathbf{T}$ is the transformation matrix to be estimated. $E_{C}$ and $E_{G}$ are the photometric and geometric terms, respectively. $\\delta\\in[0,1]$ is a weight parameter that has been determined empirically.\n", + "\n", + "The geometric term $E_{G}$ is the same as the [Point-to-plane ICP](../Basic/icp_registration.ipynb#Point-to-plane-ICP) objective\n", + "\n", + "\\begin{equation}\n", + "E_{G}(\\mathbf{T}) = \\sum_{(\\mathbf{p},\\mathbf{q})\\in\\mathcal{K}}\\big((\\mathbf{p} - \\mathbf{T}\\mathbf{q})\\cdot\\mathbf{n}_{\\mathbf{p}}\\big)^{2},\n", + "\\end{equation}\n", + "\n", + "where $\\mathcal{K}$ is the correspondence set in the current iteration. $\\mathbf{n}_{\\mathbf{p}}$ is the normal of point $\\mathbf{p}$.\n", + "\n", + "The color term $E_{C}$ measures the difference between the color of point $\\mathbf{q}$ (denoted as $C(\\mathbf{q})$) and the color of its projection on the tangent plane of $\\mathbf{p}$.\n", + "\n", + "\\begin{equation}\n", + "E_{C}(\\mathbf{T}) = \\sum_{(\\mathbf{p},\\mathbf{q})\\in\\mathcal{K}}\\big(C_{\\mathbf{p}}(\\mathbf{f}(\\mathbf{T}\\mathbf{q})) - C(\\mathbf{q})\\big)^{2},\n", + "\\end{equation}\n", + "\n", + "where $C_{\\mathbf{p}}(\\cdot)$ is a precomputed function continuously defined on the tangent plane of $\\mathbf{p}$. Function$\\mathbf{f}(\\cdot)$ projects a 3D point to the tangent plane. More details refer to [\\[Park2017\\]](../reference.html#park2017).\n", + "\n", + "To further improve efficiency, [\\[Park2017\\]](../reference.html#park2017) proposes a multi-scale registration scheme. This has been implemented in the following script." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# colored pointcloud registration\n", + "# This is implementation of following paper\n", + "# J. Park, Q.-Y. Zhou, V. Koltun,\n", + "# Colored Point Cloud Registration Revisited, ICCV 2017\n", + "voxel_radius = [0.04, 0.02, 0.01]\n", + "max_iter = [50, 30, 14]\n", + "current_transformation = np.identity(4)\n", + "print(\"3. Colored point cloud registration\")\n", + "for scale in range(3):\n", + " iter = max_iter[scale]\n", + " radius = voxel_radius[scale]\n", + " print([iter, radius, scale])\n", + "\n", + " print(\"3-1. Downsample with a voxel size %.2f\" % radius)\n", + " source_down = source.voxel_down_sample(radius)\n", + " target_down = target.voxel_down_sample(radius)\n", + "\n", + " print(\"3-2. Estimate normal.\")\n", + " source_down.estimate_normals(\n", + " o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))\n", + " target_down.estimate_normals(\n", + " o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))\n", + "\n", + " print(\"3-3. Applying colored point cloud registration\")\n", + " result_icp = o3d.registration.registration_colored_icp(\n", + " source_down, target_down, radius, current_transformation,\n", + " o3d.registration.ICPConvergenceCriteria(relative_fitness=1e-6,\n", + " relative_rmse=1e-6,\n", + " max_iteration=iter))\n", + " current_transformation = result_icp.transformation\n", + " print(result_icp)\n", + "draw_registration_result_original_color(source, target, result_icp.transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In total, 3 layers of multi-resolution point clouds are created with `voxel_down_sample`. Normals are computed with Vertex normal estimation. The core registration function `registration_colored_icp` is called for each layer, from coarse to fine. `lambda_geometric` is an optional argument for `registration_colored_icp` that determines $\\lambda \\in [0, 1]$ in the overall energy $\\lambda E_{G} + (1-\\lambda) E_{C}$.\n", + "\n", + "The output is a tight alignment of the two point clouds. Notice the green triangles on the wall." + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Advanced/colored_pointcloud_registration.py b/examples/Python/Advanced/colored_pointcloud_registration.py deleted file mode 100644 index 75c19fcc725..00000000000 --- a/examples/Python/Advanced/colored_pointcloud_registration.py +++ /dev/null @@ -1,72 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/colored_pointcloud_registration.py - -import numpy as np -import copy -import open3d as o3d - - -def draw_registration_result_original_color(source, target, transformation): - source_temp = copy.deepcopy(source) - source_temp.transform(transformation) - o3d.visualization.draw_geometries([source_temp, target]) - - -if __name__ == "__main__": - - print("1. Load two point clouds and show initial pose") - source = o3d.io.read_point_cloud("../../TestData/ColoredICP/frag_115.ply") - target = o3d.io.read_point_cloud("../../TestData/ColoredICP/frag_116.ply") - - # draw initial alignment - current_transformation = np.identity(4) - draw_registration_result_original_color(source, target, - current_transformation) - - # point to plane ICP - current_transformation = np.identity(4) - print("2. Point-to-plane ICP registration is applied on original point") - print(" clouds to refine the alignment. Distance threshold 0.02.") - result_icp = o3d.registration.registration_icp( - source, target, 0.02, current_transformation, - o3d.registration.TransformationEstimationPointToPlane()) - print(result_icp) - draw_registration_result_original_color(source, target, - result_icp.transformation) - - # colored pointcloud registration - # This is implementation of following paper - # J. Park, Q.-Y. Zhou, V. Koltun, - # Colored Point Cloud Registration Revisited, ICCV 2017 - voxel_radius = [0.04, 0.02, 0.01] - max_iter = [50, 30, 14] - current_transformation = np.identity(4) - print("3. Colored point cloud registration") - for scale in range(3): - iter = max_iter[scale] - radius = voxel_radius[scale] - print([iter, radius, scale]) - - print("3-1. Downsample with a voxel size %.2f" % radius) - source_down = source.voxel_down_sample(radius) - target_down = target.voxel_down_sample(radius) - - print("3-2. Estimate normal.") - source_down.estimate_normals( - o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30)) - target_down.estimate_normals( - o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30)) - - print("3-3. Applying colored point cloud registration") - result_icp = o3d.registration.registration_colored_icp( - source_down, target_down, radius, current_transformation, - o3d.registration.ICPConvergenceCriteria(relative_fitness=1e-6, - relative_rmse=1e-6, - max_iteration=iter)) - current_transformation = result_icp.transformation - print(result_icp) - draw_registration_result_original_color(source, target, - result_icp.transformation) diff --git a/examples/Python/Advanced/downsampling_and_trace.py b/examples/Python/Advanced/downsampling_and_trace.py deleted file mode 100644 index 62714770b37..00000000000 --- a/examples/Python/Advanced/downsampling_and_trace.py +++ /dev/null @@ -1,34 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/downsampling_and_trace.py - -import numpy as np -import open3d as o3d - -if __name__ == "__main__": - - pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply") - min_cube_size = 0.05 - print("\nOriginal, # of points %d" % (np.asarray(pcd.points).shape[0])) - pcd_down = pcd.voxel_down_sample(min_cube_size) - print("\nScale %f, # of points %d" % \ - (min_cube_size, np.asarray(pcd_down.points).shape[0])) - min_bound = pcd_down.get_min_bound() - min_cube_size * 0.5 - max_bound = pcd_down.get_max_bound() + min_cube_size * 0.5 - - pcd_curr = pcd_down - num_scales = 3 - for i in range(1, num_scales): - multiplier = pow(2, i) - pcd_curr_down, cubic_id = pcd_curr.voxel_down_sample_and_trace( - multiplier * min_cube_size, min_bound, max_bound, False) - print("\nScale %f, # of points %d" % - (multiplier * min_cube_size, np.asarray( - pcd_curr_down.points).shape[0])) - print("Downsampled points (the first 10 points)") - print(np.asarray(pcd_curr_down.points)[:10, :]) - print("Index (the first 10 indices)") - print(np.asarray(cubic_id)[:10, :]) - pcd_curr = pcd_curr_down diff --git a/examples/Python/Advanced/fast_global_registration.py b/examples/Python/Advanced/fast_global_registration.py deleted file mode 100644 index 62d14045bdd..00000000000 --- a/examples/Python/Advanced/fast_global_registration.py +++ /dev/null @@ -1,49 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/fast_global_registration.py - -import open3d as o3d -from global_registration import * -import numpy as np -import copy - -import time - - -def execute_fast_global_registration(source_down, target_down, source_fpfh, - target_fpfh, voxel_size): - distance_threshold = voxel_size * 0.5 - print(":: Apply fast global registration with distance threshold %.3f" \ - % distance_threshold) - result = o3d.registration.registration_fast_based_on_feature_matching( - source_down, target_down, source_fpfh, target_fpfh, - o3d.registration.FastGlobalRegistrationOption( - maximum_correspondence_distance=distance_threshold)) - return result - - -if __name__ == "__main__": - - voxel_size = 0.05 # means 5cm for the dataset - source, target, source_down, target_down, source_fpfh, target_fpfh = \ - prepare_dataset(voxel_size) - - start = time.time() - result_ransac = execute_global_registration(source_down, target_down, - source_fpfh, target_fpfh, - voxel_size) - print("Global registration took %.3f sec.\n" % (time.time() - start)) - print(result_ransac) - draw_registration_result(source_down, target_down, - result_ransac.transformation) - - start = time.time() - result_fast = execute_fast_global_registration(source_down, target_down, - source_fpfh, target_fpfh, - voxel_size) - print("Fast global registration took %.3f sec.\n" % (time.time() - start)) - print(result_fast) - draw_registration_result(source_down, target_down, - result_fast.transformation) diff --git a/examples/Python/Advanced/global_registration.ipynb b/examples/Python/Advanced/global_registration.ipynb new file mode 100644 index 00000000000..b74adc02a2b --- /dev/null +++ b/examples/Python/Advanced/global_registration.ipynb @@ -0,0 +1,356 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import copy\n", + "import time\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Global registration\n", + "Both [ICP registration](../Basic/icp_registration.ipynb) and [Colored point cloud registration](colored_point_cloud_registration.ipynb) are known as local registration methods because they rely on a rough alignment as initialization. This tutorial shows another class of registration methods, known as **global** registration. This family of algorithms do not require an alignment for initialization. They usually produce less tight alignment results and are used as initialization of the local methods." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualization\n", + "The helper function visualizes the transformed source point cloud together with the target point cloud." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def draw_registration_result(source, target, transformation):\n", + " source_temp = copy.deepcopy(source)\n", + " target_temp = copy.deepcopy(target)\n", + " source_temp.paint_uniform_color([1, 0.706, 0])\n", + " target_temp.paint_uniform_color([0, 0.651, 0.929])\n", + " source_temp.transform(transformation)\n", + " o3d.visualization.draw_geometries([source_temp, target_temp],\n", + " zoom=0.4559,\n", + " front=[0.6452, -0.3036, -0.7011],\n", + " lookat=[1.9892, 2.0208, 1.8945],\n", + " up=[-0.2779, -0.9482 ,0.1556])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Extract geometric feature\n", + "We down sample the point cloud, estimate normals, then compute a FPFH feature for each point. The FPFH feature is a 33-dimensional vector that describes the local geometric property of a point. A nearest neighbor query in the 33-dimensinal space can return points with similar local geometric structures. See [\\[Rasu2009\\]](../reference.html#rasu2009) for details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def preprocess_point_cloud(pcd, voxel_size):\n", + " print(\":: Downsample with a voxel size %.3f.\" % voxel_size)\n", + " pcd_down = pcd.voxel_down_sample(voxel_size)\n", + "\n", + " radius_normal = voxel_size * 2\n", + " print(\":: Estimate normal with search radius %.3f.\" % radius_normal)\n", + " pcd_down.estimate_normals(\n", + " o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))\n", + "\n", + " radius_feature = voxel_size * 5\n", + " print(\":: Compute FPFH feature with search radius %.3f.\" % radius_feature)\n", + " pcd_fpfh = o3d.registration.compute_fpfh_feature(\n", + " pcd_down,\n", + " o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))\n", + " return pcd_down, pcd_fpfh" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Input\n", + "This code below reads a source point cloud and a target point cloud from two files. They are misaligned with an identity matrix as transformation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def prepare_dataset(voxel_size):\n", + " print(\":: Load two point clouds and disturb initial pose.\")\n", + " source = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_0.pcd\")\n", + " target = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_1.pcd\")\n", + " trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0],\n", + " [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]])\n", + " source.transform(trans_init)\n", + " draw_registration_result(source, target, np.identity(4))\n", + "\n", + " source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)\n", + " target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)\n", + " return source, target, source_down, target_down, source_fpfh, target_fpfh" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "voxel_size = 0.05 # means 5cm for this dataset\n", + "source, target, source_down, target_down, source_fpfh, target_fpfh = prepare_dataset(voxel_size)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## RANSAC\n", + "We use RANSAC for global registration. In each RANSAC iteration, `ransac_n` random points are picked from the source point cloud. Their corresponding points in the target point cloud are detected by querying the nearest neighbor in the 33-dimensional FPFH feature space. A pruning step takes fast pruning algorithms to quickly reject false matches early.\n", + "\n", + "Open3D provides the following pruning algorithms:\n", + "\n", + "- `CorrespondenceCheckerBasedOnDistance` checks if aligned point clouds are close (less than specified threshold).\n", + "- `CorrespondenceCheckerBasedOnEdgeLength` checks if the lengths of any two arbitrary edges (line formed by two vertices) individually drawn from source and target correspondences are similar. This tutorial checks that $||edge_{source}|| > 0.9 \\times ||edge_{target}||$ and $||edge_{target}|| > 0.9 \\times ||edge_{source}||$ are true.\n", + "- `CorrespondenceCheckerBasedOnNormal` considers vertex normal affinity of any correspondences. It computes dot product of two normal vectors. It takes radian value for the threshold.\n", + "\n", + "Only matches that pass the pruning step are used to compute a transformation, which is validated on the entire point cloud. The core function is `registration_ransac_based_on_feature_matching`. The most important hyperparameter of this function is `RANSACConvergenceCriteria`. It defines the maximum number of RANSAC iterations and the maximum number of validation steps. The larger these two numbers are, the more accurate the result is, but also the more time the algorithm takes.\n", + "\n", + "We set the RANSAC parameters based on the empirical value provided by [\\[Choi2015]\\](../reference.html#choi2015). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def execute_global_registration(source_down, target_down, source_fpfh,\n", + " target_fpfh, voxel_size):\n", + " distance_threshold = voxel_size * 1.5\n", + " print(\":: RANSAC registration on downsampled point clouds.\")\n", + " print(\" Since the downsampling voxel size is %.3f,\" % voxel_size)\n", + " print(\" we use a liberal distance threshold %.3f.\" % distance_threshold)\n", + " result = o3d.registration.registration_ransac_based_on_feature_matching(\n", + " source_down, target_down, source_fpfh, target_fpfh, distance_threshold,\n", + " o3d.registration.TransformationEstimationPointToPoint(False), 4, [\n", + " o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),\n", + " o3d.registration.CorrespondenceCheckerBasedOnDistance(\n", + " distance_threshold)\n", + " ], o3d.registration.RANSACConvergenceCriteria(4000000, 500))\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result_ransac = execute_global_registration(source_down, target_down,\n", + " source_fpfh, target_fpfh,\n", + " voxel_size)\n", + "print(result_ransac)\n", + "draw_registration_result(source_down, target_down, result_ransac.transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "**Note:**\n", + "\n", + "Open3D provides faster implementation for global registration. Please refer to [Fast global registration](#fast-global-registration).\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Local refinement\n", + "For performance reason, the global registration is only performed on a heavily down-sampled point cloud. The result is also not tight. We use [Point-to-plane ICP](../icp_registration.ipynb#point-to-plane-ICP) to further refine the alignment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def refine_registration(source, target, source_fpfh, target_fpfh, voxel_size):\n", + " distance_threshold = voxel_size * 0.4\n", + " print(\":: Point-to-plane ICP registration is applied on original point\")\n", + " print(\" clouds to refine the alignment. This time we use a strict\")\n", + " print(\" distance threshold %.3f.\" % distance_threshold)\n", + " result = o3d.registration.registration_icp(\n", + " source, target, distance_threshold, result_ransac.transformation,\n", + " o3d.registration.TransformationEstimationPointToPlane())\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result_icp = refine_registration(source, target, source_fpfh, target_fpfh,\n", + " voxel_size)\n", + "print(result_icp)\n", + "draw_registration_result(source, target, result_icp.transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Fast global registration\n", + "The RANSAC based global registration solution may take a long time due to countless model proposals and evaluations. [\\[Zhou2016\\]](../reference.html#zhou2016) introduced a faster approach that quickly optimizes line process weights of few correspondences. As there is no model proposal and evaluation involved for each iteration, the approach proposed in [\\[Zhou2016\\]](../reference.html#zhou2016) can save a lot of computational time.\n", + "\n", + "This tutorial compares the running time of the RANSAC based global registration to the implementation of [\\[Zhou2016\\]](../reference.html#zhou2016)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Input\n", + "We use the same input as in the global registration example above." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "voxel_size = 0.05 # means 5cm for the dataset\n", + "source, target, source_down, target_down, source_fpfh, target_fpfh = \\\n", + " prepare_dataset(voxel_size)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Baseline\n", + "In the code below we time the global registration approach." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "start = time.time()\n", + "result_ransac = execute_global_registration(source_down, target_down,\n", + " source_fpfh, target_fpfh,\n", + " voxel_size)\n", + "print(\"Global registration took %.3f sec.\\n\" % (time.time() - start))\n", + "print(result_ransac)\n", + "draw_registration_result(source_down, target_down,\n", + " result_ransac.transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Fast global registration\n", + "With the same input used for a baseline, the next code below calls the implementation of [\\[Zhou2016\\]](../reference.html#zhou2016)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def execute_fast_global_registration(source_down, target_down, source_fpfh,\n", + " target_fpfh, voxel_size):\n", + " distance_threshold = voxel_size * 0.5\n", + " print(\":: Apply fast global registration with distance threshold %.3f\" \\\n", + " % distance_threshold)\n", + " result = o3d.registration.registration_fast_based_on_feature_matching(\n", + " source_down, target_down, source_fpfh, target_fpfh,\n", + " o3d.registration.FastGlobalRegistrationOption(\n", + " maximum_correspondence_distance=distance_threshold))\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "start = time.time()\n", + "result_fast = execute_fast_global_registration(source_down, target_down,\n", + " source_fpfh, target_fpfh,\n", + " voxel_size)\n", + "print(\"Fast global registration took %.3f sec.\\n\" % (time.time() - start))\n", + "print(result_fast)\n", + "draw_registration_result(source_down, target_down,\n", + " result_fast.transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With proper configuration, the accuracy of fast global registration is even comparable with ICP. Please refer to [\\[Zhou2016\\]](../reference.html#zhou2016) for more experimental results." + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Advanced/global_registration.py b/examples/Python/Advanced/global_registration.py deleted file mode 100644 index d57ab7ad81c..00000000000 --- a/examples/Python/Advanced/global_registration.py +++ /dev/null @@ -1,94 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/global_registration.py - -import open3d as o3d -import numpy as np -import copy - - -def draw_registration_result(source, target, transformation): - source_temp = copy.deepcopy(source) - target_temp = copy.deepcopy(target) - source_temp.paint_uniform_color([1, 0.706, 0]) - target_temp.paint_uniform_color([0, 0.651, 0.929]) - source_temp.transform(transformation) - o3d.visualization.draw_geometries([source_temp, target_temp]) - - -def preprocess_point_cloud(pcd, voxel_size): - print(":: Downsample with a voxel size %.3f." % voxel_size) - pcd_down = pcd.voxel_down_sample(voxel_size) - - radius_normal = voxel_size * 2 - print(":: Estimate normal with search radius %.3f." % radius_normal) - pcd_down.estimate_normals( - o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)) - - radius_feature = voxel_size * 5 - print(":: Compute FPFH feature with search radius %.3f." % radius_feature) - pcd_fpfh = o3d.registration.compute_fpfh_feature( - pcd_down, - o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100)) - return pcd_down, pcd_fpfh - - -def prepare_dataset(voxel_size): - print(":: Load two point clouds and disturb initial pose.") - source = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") - target = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd") - trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) - source.transform(trans_init) - draw_registration_result(source, target, np.identity(4)) - - source_down, source_fpfh = preprocess_point_cloud(source, voxel_size) - target_down, target_fpfh = preprocess_point_cloud(target, voxel_size) - return source, target, source_down, target_down, source_fpfh, target_fpfh - - -def execute_global_registration(source_down, target_down, source_fpfh, - target_fpfh, voxel_size): - distance_threshold = voxel_size * 1.5 - print(":: RANSAC registration on downsampled point clouds.") - print(" Since the downsampling voxel size is %.3f," % voxel_size) - print(" we use a liberal distance threshold %.3f." % distance_threshold) - result = o3d.registration.registration_ransac_based_on_feature_matching( - source_down, target_down, source_fpfh, target_fpfh, distance_threshold, - o3d.registration.TransformationEstimationPointToPoint(False), 4, [ - o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), - o3d.registration.CorrespondenceCheckerBasedOnDistance( - distance_threshold) - ], o3d.registration.RANSACConvergenceCriteria(4000000, 500)) - return result - - -def refine_registration(source, target, source_fpfh, target_fpfh, voxel_size): - distance_threshold = voxel_size * 0.4 - print(":: Point-to-plane ICP registration is applied on original point") - print(" clouds to refine the alignment. This time we use a strict") - print(" distance threshold %.3f." % distance_threshold) - result = o3d.registration.registration_icp( - source, target, distance_threshold, result_ransac.transformation, - o3d.registration.TransformationEstimationPointToPlane()) - return result - - -if __name__ == "__main__": - voxel_size = 0.05 # means 5cm for the dataset - source, target, source_down, target_down, source_fpfh, target_fpfh = \ - prepare_dataset(voxel_size) - - result_ransac = execute_global_registration(source_down, target_down, - source_fpfh, target_fpfh, - voxel_size) - print(result_ransac) - draw_registration_result(source_down, target_down, - result_ransac.transformation) - - result_icp = refine_registration(source, target, source_fpfh, target_fpfh, - voxel_size) - print(result_icp) - draw_registration_result(source, target, result_icp.transformation) diff --git a/examples/Python/Advanced/mesh_deformation.ipynb b/examples/Python/Advanced/mesh_deformation.ipynb new file mode 100644 index 00000000000..fbc60dd2e3e --- /dev/null +++ b/examples/Python/Advanced/mesh_deformation.ipynb @@ -0,0 +1,114 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Mesh deformation\n", + "If we want to deform a triangle mesh according a small number of constraints, we can use mesh deformation algorithms. Open3D implements the as-rigid-as-possible method by [\\[SorkineAndAlexa2007\\]](../reference.html#SorkineAndAlexa2007) that optimizes the following energy functional\n", + "\n", + "\\begin{equation}\n", + "\\sum_i \\sum_{j \\in \\mathcal{N}(i)} w_{ij} || (\\mathbf{p}'_i - \\mathbf{p}'_j) - \\mathbf{R}_i (\\mathbf{p}_i - \\mathbf{p}_j)||^2 \\,,\n", + "\\end{equation}\n", + "\n", + "where $\\mathbf{R}_i$ are rotation matrices that we want to optimize for, and $\\mathbf{p}'_i$ and $\\mathbf{p}_i$ are the vertex positions after and before the optimization, respectively. $\\mathcal{N}(i)$ is the set of neighbours of vertex $i$. The weights $w_{ij}$ are cot weights.\n", + "\n", + "Open3D implementes this method in `deform_as_rigid_as_possible`. The first argument to this method is a set of `constraint_ids` that refer to the vertices in the triangle mesh. The second argument `constrint_pos` defines at which position those vertices should be after the optimization. The optimization process is an iterative scheme. Hence, we also can define the number of iterations via `max_iter`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3dtut.get_armadillo_mesh()\n", + "\n", + "vertices = np.asarray(mesh.vertices)\n", + "static_ids = [idx for idx in np.where(vertices[:, 1] < -30)[0]]\n", + "static_pos = []\n", + "for id in static_ids:\n", + " static_pos.append(vertices[id])\n", + "handle_ids = [2490]\n", + "handle_pos = [vertices[2490] + np.array((-40, -40, -40))]\n", + "constraint_ids = o3d.utility.IntVector(static_ids + handle_ids)\n", + "constraint_pos = o3d.utility.Vector3dVector(static_pos + handle_pos)\n", + "\n", + "with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", + " mesh_prime = mesh.deform_as_rigid_as_possible(\n", + " constraint_ids, constraint_pos, max_iter=50)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('Original Mesh')\n", + "R = mesh.get_rotation_matrix_from_xyz((0,np.pi,0))\n", + "o3d.visualization.draw_geometries([mesh.rotate(R)])\n", + "print('Deformed Mesh')\n", + "mesh_prime.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh_prime.rotate(R)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Smoothed ARAP\n", + "Open3D also implements a smoothed version of the ARAP objective defined as \n", + "\n", + "\\begin{equation}\n", + "\\sum_i \\sum_{j \\in \\mathcal{N}(i)} w_{ij} || (\\mathbf{p}'_i - \\mathbf{p}'_j) - \\mathbf{R}_i (\\mathbf{p}_i - \\mathbf{p}_j)||^2 + \\alpha A ||\\mathbf{R}_i - \\mathbf{R}_j||^2\\,,\n", + "\\end{equation}\n", + "\n", + "that penalizes a deviation of neighbouring rotation matrices. $\\alpha$ is a trade-off parameter for the regularization term and $A$ is the surface area. \n", + "\n", + "The smoothed objective can be used in `deform_as_rigid_as_possible` by using the argument `energy` with the parameter `Smoothed`." + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/Python/Advanced/mesh_voxelization.py b/examples/Python/Advanced/mesh_voxelization.py deleted file mode 100644 index ed96b9d7491..00000000000 --- a/examples/Python/Advanced/mesh_voxelization.py +++ /dev/null @@ -1,119 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/mesh_voxelization.py - -import os -import sys -import urllib.request -import gzip -import tarfile -import shutil -import time -import open3d as o3d -import numpy as np -import numpy.matlib - -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - - -def preprocess(model): - """Normalize model to fit in unit sphere (sphere with unit radius). - - Calculate center & scale of vertices, and transform vertices to have 0 mean and unit variance. - - Returns: - open3d.geometry.TriangleMesh: normalized mesh - """ - min_bound = model.get_min_bound() - max_bound = model.get_max_bound() - center = min_bound + (max_bound - min_bound) / 2.0 - scale = np.linalg.norm(max_bound - min_bound) / 2.0 - vertices = np.asarray(model.vertices) - vertices -= np.matlib.repmat(center, len(model.vertices), 1) - model.vertices = o3d.utility.Vector3dVector(vertices / scale) - - ## Paint uniform color for pleasing visualization - model.paint_uniform_color(np.array([1, 0.7, 0])) - return model - - -def mesh_generator(): - """Yields meshes - Yields: - open3d.geometry.TriangleMesh: yielded mesh - """ - yield meshes.armadillo() - yield meshes.bunny() - - -if __name__ == "__main__": - print("Start mesh_sampling_and_voxelization example") - for mesh in mesh_generator(): - print("Normalize mesh") - mesh = preprocess(mesh) - o3d.visualization.draw_geometries([mesh]) - print("") - - print("Sample uniform points") - start = time.time() - pcd = mesh.sample_points_uniformly(number_of_points=100000) - print("took %.2f milliseconds" % ((time.time() - start) * 1000.0)) - print("") - - print("visualize sampled point cloud") - o3d.visualization.draw_geometries([pcd]) - print("") - - print("Voxelize point cloud") - start = time.time() - voxel = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, - voxel_size=0.05) - print("took %.2f milliseconds" % ((time.time() - start) * 1000.0)) - print("") - - print("visualize voxel grid") - o3d.visualization.draw_geometries([voxel]) - print("") - - print("Save and load voxel grid") - print(voxel) - start = time.time() - o3d.io.write_voxel_grid("save.ply", voxel) - voxel_load = o3d.io.read_voxel_grid("save.ply") - print("took %.2f milliseconds" % ((time.time() - start) * 1000.0)) - print(voxel_load) - print(voxel_load.voxel_size) - print(voxel_load.origin) - print("") - - print("Element-wise check if points belong to voxel grid") - queries = np.asarray(pcd.points) - start = time.time() - output = voxel_load.check_if_included( - o3d.utility.Vector3dVector(queries)) - print("took %.2f milliseconds" % ((time.time() - start) * 1000.0)) - print(output[:10]) - print("") - - print( - "Element-wise check if points with additive Gaussian noise belong to voxel grid" - ) - queries_noise = queries + np.random.normal(0, 0.1, (len(pcd.points), 3)) - start = time.time() - output_noise = voxel_load.check_if_included( - o3d.utility.Vector3dVector(queries_noise)) - print(output_noise[:10]) - print("took %.2f milliseconds" % ((time.time() - start) * 1000.0)) - print("") - - print("Transform voxelgrid to octree") - start = time.time() - octree = voxel_load.to_octree(max_depth=8) - print(octree) - print("took %.2f milliseconds" % ((time.time() - start) * 1000.0)) - o3d.visualization.draw_geometries([octree]) - print("") diff --git a/examples/Python/Advanced/multiway_registration.ipynb b/examples/Python/Advanced/multiway_registration.ipynb new file mode 100644 index 00000000000..f2a79015273 --- /dev/null +++ b/examples/Python/Advanced/multiway_registration.ipynb @@ -0,0 +1,273 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Multiway registration\n", + "Multiway registration is the process to align multiple pieces of geometry in a global space. Typically, the input is a set of geometries (e.g., point clouds or RGBD images) $\\{\\mathbf{P}_{i}\\}$. The output is a set of rigid transformations $\\{\\mathbf{T}_{i}\\}$, so that the transformed point clouds $\\{\\mathbf{T}_{i}\\mathbf{P}_{i}\\}$ are aligned in the global space.\n", + "\n", + "Open3D implements multiway registration via pose graph optimization. The backend implements the technique presented in [\\[Choi2015\\]](../reference.html#choi2015)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Input\n", + "The first part of the tutorial code reads three point clouds from files. The point clouds are downsampled and visualized together. They are misaligned." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def load_point_clouds(voxel_size=0.0):\n", + " pcds = []\n", + " for i in range(3):\n", + " pcd = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_%d.pcd\" % i)\n", + " pcd_down = pcd.voxel_down_sample(voxel_size=voxel_size)\n", + " pcds.append(pcd_down)\n", + " return pcds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "voxel_size = 0.02\n", + "pcds_down = load_point_clouds(voxel_size)\n", + "o3d.visualization.draw_geometries(pcds_down, zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pose graph\n", + "A pose graph has two key elements: nodes and edges. A node is a piece of geometry $\\mathbf{P}_{i}$ associated with a pose matrix $\\mathbf{T}_{i}$ which transforms $\\mathbf{P}_{i}$ into the global space. The set $\\{\\mathbf{T}_{i}\\}$ are the unknown variables to be optimized. `PoseGraph.nodes` is a list of `PoseGraphNode`. We set the global space to be the space of $\\mathbf{P}_{0}$. Thus $\\mathbf{T}_{0}$ is the identity matrix. The other pose matrices are initialized by accumulating transformation between neighboring nodes. The neighboring nodes usually have large overlap and can be registered with [Point-to-plane ICP](../Basic/icp_registration.ipynb#point-to-plane-ICP).\n", + "\n", + "A pose graph edge connects two nodes (pieces of geometry) that overlap. Each edge contains a transformation matrix $\\mathbf{T}_{i,j}$ that aligns the source geometry $\\mathbf{P}_{i}$ to the target geometry $\\mathbf{P}_{j}$. This tutorial uses [Point-to-plane ICP](../Basic/icp_registration.ipynb#point-to-plane-ICP) to estimate the transformation. In more complicated cases, this pairwise registration problem should be solved via [Global registration](global_registration.ipynb).\n", + "\n", + "[\\[Choi2015\\]](../reference.html#choi2015) has observed that pairwise registration is error-prone. False pairwise alignments can outnumber correctly aligned pairs. Thus, they partition pose graph edges into two classes. **Odometry edges** connect temporally close, neighboring nodes. A local registration algorithm such as ICP can reliably align them. **Loop closure edges** connect any non-neighboring nodes. The alignment is found by global registration and is less reliable. In Open3D, these two classes of edges are distinguished by the `uncertain` parameter in the initializer of `PoseGraphEdge`.\n", + "\n", + "In addition to the transformation matrix $\\mathbf{T}_{i}$, the user can set an information matrix $\\mathbf{\\Lambda}_{i}$ for each edge. If $\\mathbf{\\Lambda}_{i}$ is set using function `get_information_matrix_from_point_clouds`, the loss on this pose graph edge approximates the RMSE of the corresponding sets between the two nodes, with a line process weight. Refer to Eq (3) to (9) in [\\[Choi2015\\]](../reference.html#choi2015) and [the Redwood registration benchmark](http://redwood-data.org/indoor/registration.html) for details.\n", + "\n", + "The script creates a pose graph with three nodes and three edges. Among the edges, two of them are odometry edges (`uncertain = False`) and one is a loop closure edge (`uncertain = True`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def pairwise_registration(source, target):\n", + " print(\"Apply point-to-plane ICP\")\n", + " icp_coarse = o3d.registration.registration_icp(\n", + " source, target, max_correspondence_distance_coarse, np.identity(4),\n", + " o3d.registration.TransformationEstimationPointToPlane())\n", + " icp_fine = o3d.registration.registration_icp(\n", + " source, target, max_correspondence_distance_fine,\n", + " icp_coarse.transformation,\n", + " o3d.registration.TransformationEstimationPointToPlane())\n", + " transformation_icp = icp_fine.transformation\n", + " information_icp = o3d.registration.get_information_matrix_from_point_clouds(\n", + " source, target, max_correspondence_distance_fine,\n", + " icp_fine.transformation)\n", + " return transformation_icp, information_icp\n", + "\n", + "\n", + "def full_registration(pcds, max_correspondence_distance_coarse,\n", + " max_correspondence_distance_fine):\n", + " pose_graph = o3d.registration.PoseGraph()\n", + " odometry = np.identity(4)\n", + " pose_graph.nodes.append(o3d.registration.PoseGraphNode(odometry))\n", + " n_pcds = len(pcds)\n", + " for source_id in range(n_pcds):\n", + " for target_id in range(source_id + 1, n_pcds):\n", + " transformation_icp, information_icp = pairwise_registration(\n", + " pcds[source_id], pcds[target_id])\n", + " print(\"Build o3d.registration.PoseGraph\")\n", + " if target_id == source_id + 1: # odometry case\n", + " odometry = np.dot(transformation_icp, odometry)\n", + " pose_graph.nodes.append(\n", + " o3d.registration.PoseGraphNode(np.linalg.inv(odometry)))\n", + " pose_graph.edges.append(\n", + " o3d.registration.PoseGraphEdge(source_id,\n", + " target_id,\n", + " transformation_icp,\n", + " information_icp,\n", + " uncertain=False))\n", + " else: # loop closure case\n", + " pose_graph.edges.append(\n", + " o3d.registration.PoseGraphEdge(source_id,\n", + " target_id,\n", + " transformation_icp,\n", + " information_icp,\n", + " uncertain=True))\n", + " return pose_graph" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Full registration ...\")\n", + "max_correspondence_distance_coarse = voxel_size * 15\n", + "max_correspondence_distance_fine = voxel_size * 1.5\n", + "with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", + " pose_graph = full_registration(pcds_down,\n", + " max_correspondence_distance_coarse,\n", + " max_correspondence_distance_fine)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Open3D uses function `global_optimization` to perform pose graph optimization. Two types of optimization methods can be chosen: `GlobalOptimizationGaussNewton` or `GlobalOptimizationLevenbergMarquardt`. The latter is recommended since it has better convergence property. Class `GlobalOptimizationConvergenceCriteria` can be used to set the maximum number of iterations and various optimization parameters.\n", + "\n", + "Class `GlobalOptimizationOption` defines a couple of options. `max_correspondence_distance` decides the correspondence threshold. `edge_prune_threshold` is a threshold for pruning outlier edges. `reference_node` is the node id that is considered to be the global space." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Optimizing PoseGraph ...\")\n", + "option = o3d.registration.GlobalOptimizationOption(\n", + " max_correspondence_distance=max_correspondence_distance_fine,\n", + " edge_prune_threshold=0.25,\n", + " reference_node=0)\n", + "with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", + " o3d.registration.global_optimization(\n", + " pose_graph, o3d.registration.GlobalOptimizationLevenbergMarquardt(),\n", + " o3d.registration.GlobalOptimizationConvergenceCriteria(), option)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The global optimization performs twice on the pose graph. The first pass optimizes poses for the original pose graph taking all edges into account and does its best to distinguish false alignments among uncertain edges. These false alignments have small line process weights, and they are pruned after the first pass. The second pass runs without them and produces a tight global alignment. In this example, all the edges are considered as true alignments, hence the second pass terminates immediately." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize optimization\n", + "The transformed point clouds are listed and visualized using `draw_geometries`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Transform points and display\")\n", + "for point_id in range(len(pcds_down)):\n", + " print(pose_graph.nodes[point_id].pose)\n", + " pcds_down[point_id].transform(pose_graph.nodes[point_id].pose)\n", + "o3d.visualization.draw_geometries(pcds_down, zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Make a combined point cloud\n", + "`PointCloud` has convenient operator `+` that can merge two point clouds into single one. After merging, the points are uniformly resampled using `voxel_down_sample`. This is recommended post-processing after merging point cloud since this can relieve duplicating or over-densified points." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcds = load_point_clouds(voxel_size)\n", + "pcd_combined = o3d.geometry.PointCloud()\n", + "for point_id in range(len(pcds)):\n", + " pcds[point_id].transform(pose_graph.nodes[point_id].pose)\n", + " pcd_combined += pcds[point_id]\n", + "pcd_combined_down = pcd_combined.voxel_down_sample(voxel_size=voxel_size)\n", + "o3d.io.write_point_cloud(\"multiway_registration.pcd\", pcd_combined_down)\n", + "o3d.visualization.draw_geometries([pcd_combined_down], zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "**Note:**\n", + "\n", + "Although this tutorial demonstrates multiway registration for point clouds. The same procedure can be applied to RGBD images. See [Make fragments](../ReconstructionSystem/make_fragments.rst) for an example.\n", + "\n", + "
" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Advanced/multiway_registration.py b/examples/Python/Advanced/multiway_registration.py deleted file mode 100644 index fcfc6689b9d..00000000000 --- a/examples/Python/Advanced/multiway_registration.py +++ /dev/null @@ -1,105 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/multiway_registration.py - -import open3d as o3d -import numpy as np - -voxel_size = 0.02 -max_correspondence_distance_coarse = voxel_size * 15 -max_correspondence_distance_fine = voxel_size * 1.5 - - -def load_point_clouds(voxel_size=0.0): - pcds = [] - for i in range(3): - pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_%d.pcd" % i) - pcd_down = pcd.voxel_down_sample(voxel_size=voxel_size) - pcds.append(pcd_down) - return pcds - - -def pairwise_registration(source, target): - print("Apply point-to-plane ICP") - icp_coarse = o3d.registration.registration_icp( - source, target, max_correspondence_distance_coarse, np.identity(4), - o3d.registration.TransformationEstimationPointToPlane()) - icp_fine = o3d.registration.registration_icp( - source, target, max_correspondence_distance_fine, - icp_coarse.transformation, - o3d.registration.TransformationEstimationPointToPlane()) - transformation_icp = icp_fine.transformation - information_icp = o3d.registration.get_information_matrix_from_point_clouds( - source, target, max_correspondence_distance_fine, - icp_fine.transformation) - return transformation_icp, information_icp - - -def full_registration(pcds, max_correspondence_distance_coarse, - max_correspondence_distance_fine): - pose_graph = o3d.registration.PoseGraph() - odometry = np.identity(4) - pose_graph.nodes.append(o3d.registration.PoseGraphNode(odometry)) - n_pcds = len(pcds) - for source_id in range(n_pcds): - for target_id in range(source_id + 1, n_pcds): - transformation_icp, information_icp = pairwise_registration( - pcds[source_id], pcds[target_id]) - print("Build o3d.registration.PoseGraph") - if target_id == source_id + 1: # odometry case - odometry = np.dot(transformation_icp, odometry) - pose_graph.nodes.append( - o3d.registration.PoseGraphNode(np.linalg.inv(odometry))) - pose_graph.edges.append( - o3d.registration.PoseGraphEdge(source_id, - target_id, - transformation_icp, - information_icp, - uncertain=False)) - else: # loop closure case - pose_graph.edges.append( - o3d.registration.PoseGraphEdge(source_id, - target_id, - transformation_icp, - information_icp, - uncertain=True)) - return pose_graph - - -if __name__ == "__main__": - - o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) - pcds_down = load_point_clouds(voxel_size) - o3d.visualization.draw_geometries(pcds_down) - - print("Full registration ...") - pose_graph = full_registration(pcds_down, - max_correspondence_distance_coarse, - max_correspondence_distance_fine) - - print("Optimizing PoseGraph ...") - option = o3d.registration.GlobalOptimizationOption( - max_correspondence_distance=max_correspondence_distance_fine, - edge_prune_threshold=0.25, - reference_node=0) - o3d.registration.global_optimization( - pose_graph, o3d.registration.GlobalOptimizationLevenbergMarquardt(), - o3d.registration.GlobalOptimizationConvergenceCriteria(), option) - - print("Transform points and display") - for point_id in range(len(pcds_down)): - print(pose_graph.nodes[point_id].pose) - pcds_down[point_id].transform(pose_graph.nodes[point_id].pose) - o3d.visualization.draw_geometries(pcds_down) - - print("Make a combined point cloud") - pcds = load_point_clouds(voxel_size) - pcd_combined = o3d.geometry.PointCloud() - for point_id in range(len(pcds)): - pcds[point_id].transform(pose_graph.nodes[point_id].pose) - pcd_combined += pcds[point_id] - pcd_combined_down = pcd_combined.voxel_down_sample(voxel_size=voxel_size) - o3d.io.write_point_cloud("multiway_registration.pcd", pcd_combined_down) - o3d.visualization.draw_geometries([pcd_combined_down]) diff --git a/examples/Python/Advanced/pointcloud_outlier_removal.ipynb b/examples/Python/Advanced/pointcloud_outlier_removal.ipynb new file mode 100644 index 00000000000..b0bf5271570 --- /dev/null +++ b/examples/Python/Advanced/pointcloud_outlier_removal.ipynb @@ -0,0 +1,176 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Point cloud outlier removal\n", + "When collecting data from scanning devices, it happens that the point cloud contains noise and artifact that one would like to remove. This tutorial address the outlier removal features of Open3D." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prepare input data\n", + "A point cloud is loaded and downsampled using `voxel_downsample`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Load a ply point cloud, print it, and render it\")\n", + "pcd = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_2.pcd\")\n", + "o3d.visualization.draw_geometries([pcd], zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])\n", + "\n", + "print(\"Downsample the point cloud with a voxel of 0.02\")\n", + "voxel_down_pcd = pcd.voxel_down_sample(voxel_size=0.02)\n", + "o3d.visualization.draw_geometries([voxel_down_pcd], zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For comparison, `uniform_down_sample` can downsample point cloud by collecting every n-th points." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Every 5th points are selected\")\n", + "uni_down_pcd = pcd.uniform_down_sample(every_k_points=5)\n", + "o3d.visualization.draw_geometries([uni_down_pcd], zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Select down sample\n", + "The helper function uses `select_down_sample` that takes binary mask to output only the selected points. The selected points and the non-selected points are visualized." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def display_inlier_outlier(cloud, ind):\n", + " inlier_cloud = cloud.select_by_index(ind)\n", + " outlier_cloud = cloud.select_by_index(ind, invert=True)\n", + "\n", + " print(\"Showing outliers (red) and inliers (gray): \")\n", + " outlier_cloud.paint_uniform_color([1, 0, 0])\n", + " inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8])\n", + " o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud], \n", + " zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Statistical outlier removal\n", + "`statistical_outlier_removal` removes points that are further away from their neighbors compared to the average for the point cloud. It takes two input parameters:\n", + "\n", + "- `nb_neighbors` allows to specify how many neighbors are taken into account in order to calculate the average distance for a given point.\n", + "- `std_ratio` allows to set the threshold level based on the standard deviation of the average distances across the point cloud. The lower this number the more aggressive the filter will be." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Statistical oulier removal\")\n", + "cl, ind = voxel_down_pcd.remove_statistical_outlier(nb_neighbors=20,\n", + " std_ratio=2.0)\n", + "display_inlier_outlier(voxel_down_pcd, ind)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Radius outlier removal\n", + "`radius_outlier_removal` removes points that have few neighbors in a given sphere around them. Two parameters can be used to tune the filter to your data:\n", + "\n", + "- `nb_points` lets you pick the minimum amount of points that the sphere should contain\n", + "- `radius` defines the radius of the sphere that will be used for counting the neighbors." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Radius oulier removal\")\n", + "cl, ind = voxel_down_pcd.remove_radius_outlier(nb_points=16, radius=0.05)\n", + "display_inlier_outlier(voxel_down_pcd, ind)" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Advanced/pointcloud_outlier_removal.py b/examples/Python/Advanced/pointcloud_outlier_removal.py deleted file mode 100644 index 81c5be385e9..00000000000 --- a/examples/Python/Advanced/pointcloud_outlier_removal.py +++ /dev/null @@ -1,41 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/pointcloud_outlier_removal.py - -import open3d as o3d - - -def display_inlier_outlier(cloud, ind): - inlier_cloud = cloud.select_by_index(ind) - outlier_cloud = cloud.select_by_index(ind, invert=True) - - print("Showing outliers (red) and inliers (gray): ") - outlier_cloud.paint_uniform_color([1, 0, 0]) - inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8]) - o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud]) - - -if __name__ == "__main__": - - print("Load a ply point cloud, print it, and render it") - pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_2.pcd") - o3d.visualization.draw_geometries([pcd]) - - print("Downsample the point cloud with a voxel of 0.02") - voxel_down_pcd = pcd.voxel_down_sample(voxel_size=0.02) - o3d.visualization.draw_geometries([voxel_down_pcd]) - - print("Every 5th points are selected") - uni_down_pcd = pcd.uniform_down_sample(every_k_points=5) - o3d.visualization.draw_geometries([uni_down_pcd]) - - print("Statistical oulier removal") - cl, ind = voxel_down_pcd.remove_statistical_outlier(nb_neighbors=20, - std_ratio=2.0) - display_inlier_outlier(voxel_down_pcd, ind) - - print("Radius oulier removal") - cl, ind = voxel_down_pcd.remove_radius_outlier(nb_points=16, radius=0.05) - display_inlier_outlier(voxel_down_pcd, ind) diff --git a/examples/Python/Advanced/rgbd_integration.ipynb b/examples/Python/Advanced/rgbd_integration.ipynb new file mode 100644 index 00000000000..15e7a2c2762 --- /dev/null +++ b/examples/Python/Advanced/rgbd_integration.ipynb @@ -0,0 +1,185 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RGBD integration\n", + "Open3D implements a scalable RGBD image integration algorithm. The algorithm is based on the technique presented in [\\[Curless1996\\]](../reference.html#curless1996) and [\\[Newcombe2011\\]](../reference.html#newcombe2011). In order to support large scenes, we use a hierarchical hashing structure introduced in [Integrater in ElasticReconstruction](https://github.com/qianyizh/ElasticReconstruction/tree/master/Integrate)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Read trajectory from .log file\n", + "This tutorial uses the function `read_trajectory` to read a camera trajectory from a [.log file](http://redwood-data.org/indoor/fileformat.html). A sample `.log` file is as follows.\n", + "\n", + "```\n", + "# examples/TestData/RGBD/odometry.log\n", + "0 0 1\n", + "1 0 0 2\n", + "0 1 0 2\n", + "0 0 1 -0.3\n", + "0 0 0 1\n", + "1 1 2\n", + "0.999988 3.08668e-005 0.0049181 1.99962\n", + "-8.84184e-005 0.999932 0.0117022 1.97704\n", + "-0.0049174 -0.0117024 0.999919 -0.300486\n", + "0 0 0 1\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class CameraPose:\n", + " def __init__(self, meta, mat):\n", + " self.metadata = meta\n", + " self.pose = mat\n", + "\n", + " def __str__(self):\n", + " return 'Metadata : ' + ' '.join(map(str, self.metadata)) + '\\n' + \\\n", + " \"Pose : \" + \"\\n\" + np.array_str(self.pose)\n", + " \n", + "def read_trajectory(filename):\n", + " traj = []\n", + " with open(filename, 'r') as f:\n", + " metastr = f.readline()\n", + " while metastr:\n", + " metadata = list(map(int, metastr.split()))\n", + " mat = np.zeros(shape=(4, 4))\n", + " for i in range(4):\n", + " matstr = f.readline()\n", + " mat[i, :] = np.fromstring(matstr, dtype=float, sep=' \\t')\n", + " traj.append(CameraPose(metadata, mat))\n", + " metastr = f.readline()\n", + " return traj" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "camera_poses = read_trajectory(\"../../TestData/RGBD/odometry.log\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## TSDF volume integration\n", + "Open3D provides two types of TSDF volumes: `UniformTSDFVolume` and `ScalableTSDFVolume`. The latter is recommended since it uses a hierarchical structure and thus supports larger scenes.\n", + "\n", + "`ScalableTSDFVolume` has several parameters. `voxel_length = 4.0 / 512.0` means a single voxel size for TSDF volume is $\\frac{4.0m}{512.0} = 7.8125mm$. Lowering this value makes a high-resolution TSDF volume, but the integration result can be susceptible to depth noise. `sdf_trunc = 0.04` specifies truncation value for signed distance function (SDF). When `color_type = TSDFVolumeColorType.RGB8`, 8 bit RGB color is also integrated as part of the TSDF volume. Float type intensity can be integrated with `color_type = TSDFVolumeColorType.Gray32` and `convert_rgb_to_intensity = True`. The color integration is inspired by [PCL](http://pointclouds.org/)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume = o3d.integration.ScalableTSDFVolume(\n", + " voxel_length=4.0 / 512.0,\n", + " sdf_trunc=0.04,\n", + " color_type=o3d.integration.TSDFVolumeColorType.RGB8)\n", + "\n", + "for i in range(len(camera_poses)):\n", + " print(\"Integrate {:d}-th image into the volume.\".format(i))\n", + " color = o3d.io.read_image(\n", + " \"../../TestData/RGBD/color/{:05d}.jpg\".format(i))\n", + " depth = o3d.io.read_image(\n", + " \"../../TestData/RGBD/depth/{:05d}.png\".format(i))\n", + " rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", + " color, depth, depth_trunc=4.0, convert_rgb_to_intensity=False)\n", + " volume.integrate(\n", + " rgbd,\n", + " o3d.camera.PinholeCameraIntrinsic(\n", + " o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault),\n", + " np.linalg.inv(camera_poses[i].pose))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Extract a mesh\n", + "Mesh extraction uses the marching cubes algorithm [\\[LorensenAndCline1987\\]](../reference.html#lorensenandcline1987)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Extract a triangle mesh from the volume and visualize it.\")\n", + "mesh = volume.extract_triangle_mesh()\n", + "mesh.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh], front=[0.5297, -0.1873, -0.8272], \n", + " lookat=[2.0712, 2.0312, 1.7251],\n", + " up=[-0.0558, -0.9809, 0.1864], zoom=0.47)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "**Note:**\n", + "\n", + "TSDF volume works like weighted average filter in 3D space. If more frames are integrated, the volume produces smoother and nicer mesh. Please check [Make fragments](../ReconstructionSystem/make_fragments.rst) for more examples.\n", + "\n", + "
" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Advanced/rgbd_integration.py b/examples/Python/Advanced/rgbd_integration.py deleted file mode 100644 index 07cceedd226..00000000000 --- a/examples/Python/Advanced/rgbd_integration.py +++ /dev/null @@ -1,35 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/rgbd_integration.py - -import open3d as o3d -from trajectory_io import * -import numpy as np - -if __name__ == "__main__": - camera_poses = read_trajectory("../../TestData/RGBD/odometry.log") - volume = o3d.integration.ScalableTSDFVolume( - voxel_length=4.0 / 512.0, - sdf_trunc=0.04, - color_type=o3d.integration.TSDFVolumeColorType.RGB8) - - for i in range(len(camera_poses)): - print("Integrate {:d}-th image into the volume.".format(i)) - color = o3d.io.read_image( - "../../TestData/RGBD/color/{:05d}.jpg".format(i)) - depth = o3d.io.read_image( - "../../TestData/RGBD/depth/{:05d}.png".format(i)) - rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth( - color, depth, depth_trunc=4.0, convert_rgb_to_intensity=False) - volume.integrate( - rgbd, - o3d.camera.PinholeCameraIntrinsic( - o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault), - np.linalg.inv(camera_poses[i].pose)) - - print("Extract a triangle mesh from the volume and visualize it.") - mesh = volume.extract_triangle_mesh() - mesh.compute_vertex_normals() - o3d.visualization.draw_geometries([mesh]) diff --git a/examples/Python/Advanced/surface_reconstruction.ipynb b/examples/Python/Advanced/surface_reconstruction.ipynb new file mode 100644 index 00000000000..d92f4e5393f --- /dev/null +++ b/examples/Python/Advanced/surface_reconstruction.ipynb @@ -0,0 +1,241 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import sys\n", + "\n", + "# only needed for tutorial, monkey patches visualization\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Surface Reconstruction\n", + "\n", + "In many scenarios we want to generate a dense 3D geometry, i.e., a triangle mesh. However, from a multi-view stereo method, or a depth sensor we only obtain an unstructured point cloud. To get a triangle mesh from this unstructured input we need a surface reconstruction method. In the literature there exist a couple of methods and Open3D implements currently the following methods:\n", + "\n", + "- Alpha shapes [\\[Edelsbrunner1983\\]](../reference.html#Edelsbrunner1983)\n", + "- Ball pivoting [\\[Bernardini1999\\]](../reference.html#Bernardini1999)\n", + "- Poisson [\\[Kazhdan2006\\]](../reference.html#Kazhdan2006)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Alpha shapes\n", + "The alpha shape [\\[Edelsbrunner1983\\]](../reference.html#Edelsbrunner1983) is a generalization of a convex hull. As described [here](https://graphics.stanford.edu/courses/cs268-11-spring/handouts/AlphaShapes/as_fisher.pdf) one can intuitively\n", + "think of an alpha shape as the following: Imagine a huge mass of ice-cream containing the points $S$ as hard chocolate pieces. Using one of these sphere-formed ice-cream spoons we carve out all parts of the icecream block we can reach without bumping into chocolate pieces, thereby even carving out holes in the inside (e.g., parts not reachable by simply moving the\n", + "spoon from the outside). We will eventually end up with a (not necessarily convex) object bounded by caps, arcs and points. If we now straighten all round faces to triangles and line segments, we have an intuitive description of what is called the alpha shape of $S$.\n", + "\n", + "Open3D implements the method `create_from_point_cloud_alpha_shape` that involves the tradeoff parameter `alpha`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3dtut.get_bunny_mesh()\n", + "pcd = mesh.sample_points_poisson_disk(750)\n", + "o3d.visualization.draw_geometries([pcd])\n", + "alpha = 0.03\n", + "print(f\"alpha={alpha:.3f}\")\n", + "mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(\n", + " pcd, alpha)\n", + "mesh.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh], mesh_show_back_face=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The implementation is based on the convex hull of the point cloud. If we want to compute multiple alpha shapes from a given point cloud, then we can save some computation by only computing the convex hull once and pass it to `create_from_point_cloud_alpha_shape`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tetra_mesh, pt_map = o3d.geometry.TetraMesh.create_from_point_cloud(pcd)\n", + "for alpha in np.logspace(np.log10(0.5), np.log10(0.01), num=4):\n", + " print(f\"alpha={alpha:.3f}\")\n", + " mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(\n", + " pcd, alpha, tetra_mesh, pt_map)\n", + " mesh.compute_vertex_normals()\n", + " o3d.visualization.draw_geometries([mesh], mesh_show_back_face=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Ball pivoting\n", + "A to alpha shapes related method for surface reconstruction is the ball pivoting algorithm (BPA) [\\[Bernardini1999\\]](../reference.html#Bernardini1999). Intuitively, think of a 3D ball with a given radius that we drop on the point cloud. If it hits any 3 points (and it does not fall through those 3 points) it creates a triangles. Then, the algorithm starts pivoting from the edges of the existing triangles and every time it hits 3 points where the ball does not fall through we create another triangle.\n", + "\n", + "Open3D implements this method in `create_from_point_cloud_ball_pivoting`. The method accepts a list of `radii` as parameter that corresponds to the radii of the individual balls that are pivoted on the point cloud.\n", + "\n", + "\n", + "
\n", + " \n", + "**Note:** \n", + "\n", + "This algorithm assumes that the `PointCloud` has `normals`.\n", + "\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gt_mesh = o3dtut.get_bunny_mesh()\n", + "gt_mesh.compute_vertex_normals()\n", + "pcd = gt_mesh.sample_points_poisson_disk(3000)\n", + "o3d.visualization.draw_geometries([pcd], point_show_normal=True)\n", + "\n", + "radii = [0.005, 0.01, 0.02, 0.04]\n", + "rec_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(\n", + " pcd, o3d.utility.DoubleVector(radii))\n", + "o3d.visualization.draw_geometries([pcd, rec_mesh])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Poisson surface reconstruction\n", + "The surface reconstruction methods produce non-smooth results as the points of the `PointCloud` are also the `vertices` of the triangle mesh without any modification. The Poisson surface reconstruction method [\\[Kazhdan2006\\]](../reference.html#Kazhdan2006) solves a regularized optimization problem to obtain a smooth surface. \n", + "\n", + "Open3D implements the method `create_from_point_cloud_poisson` that is basically a wrapper of the code of [Kazhdan](https://github.com/mkazhdan/PoissonRecon). An important parameter of the function is `depth` that defines the depth of the octree used for the surface reconstruction and hence, implies the resolution of the resulting triangle mesh. A higher `depth` values means a mesh with more details.\n", + "\n", + "
\n", + " \n", + "**Note:** \n", + "\n", + "This algorithm assumes that the `PointCloud` has `normals`.\n", + "\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcd = o3dtut.get_eagle_pcd()\n", + "print(pcd)\n", + "o3d.visualization.draw_geometries([pcd], zoom=0.664,\n", + " front=[-0.4761, -0.4698, -0.7434],\n", + " lookat=[1.8900, 3.2596, 0.9284],\n", + " up=[0.2304, -0.8825, 0.4101])\n", + "\n", + "print('run Poisson surface reconstruction')\n", + "with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", + " mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=9)\n", + "print(mesh)\n", + "o3d.visualization.draw_geometries([mesh], zoom=0.664,\n", + " front=[-0.4761, -0.4698, -0.7434],\n", + " lookat=[1.8900, 3.2596, 0.9284],\n", + " up=[0.2304, -0.8825, 0.4101])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Poisson surface reconstruction will also create triangles in areas of low point density, and even extrapolates into some areas (see bottom of the eagle output above). The `create_from_point_cloud_poisson` function has a second `densities` return value that indicates for each vertex the density. A low density value means that the vertex is only supported by a low number of points from the input point cloud.\n", + "\n", + "In the code below we visualize the density in 3D using pseudo color. Violet indicates low density and yellow indicates a high density." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('visualize densities')\n", + "densities = np.asarray(densities)\n", + "density_colors = plt.get_cmap('plasma')(\n", + " (densities - densities.min()) / (densities.max() - densities.min()))\n", + "density_colors = density_colors[:, :3]\n", + "density_mesh = o3d.geometry.TriangleMesh()\n", + "density_mesh.vertices = mesh.vertices\n", + "density_mesh.triangles = mesh.triangles\n", + "density_mesh.triangle_normals = mesh.triangle_normals\n", + "density_mesh.vertex_colors = o3d.utility.Vector3dVector(density_colors)\n", + "o3d.visualization.draw_geometries([density_mesh], zoom=0.664,\n", + " front=[-0.4761, -0.4698, -0.7434],\n", + " lookat=[1.8900, 3.2596, 0.9284],\n", + " up=[0.2304, -0.8825, 0.4101])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can further use the density values to remove vertices and triangles that have a low support. In the code below we remove all vertices (and connected triangles) that have a lower density value than the $0.01$ quantile of all density values." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('remove low density vertices')\n", + "vertices_to_remove = densities < np.quantile(densities, 0.01)\n", + "mesh.remove_vertices_by_mask(vertices_to_remove)\n", + "print(mesh)\n", + "o3d.visualization.draw_geometries([mesh], zoom=0.664,\n", + " front=[-0.4761, -0.4698, -0.7434],\n", + " lookat=[1.8900, 3.2596, 0.9284],\n", + " up=[0.2304, -0.8825, 0.4101])" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/Python/Advanced/surface_reconstruction_alpha_shape.py b/examples/Python/Advanced/surface_reconstruction_alpha_shape.py deleted file mode 100644 index 3a5a3e5930d..00000000000 --- a/examples/Python/Advanced/surface_reconstruction_alpha_shape.py +++ /dev/null @@ -1,52 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/surface_reconstruction_alpha_shape.py - -import open3d as o3d -import numpy as np -import os - -import sys - -sys.path.append( - os.path.join(os.path.dirname(os.path.realpath(__file__)), "../Misc")) -import meshes - - -def draw_geometries_with_back_face(geometries): - visualizer = o3d.visualization.Visualizer() - visualizer.create_window() - render_option = visualizer.get_render_option() - render_option.mesh_show_back_face = True - for geometry in geometries: - visualizer.add_geometry(geometry) - visualizer.run() - visualizer.destroy_window() - - -if __name__ == "__main__": - o3d.utility.set_verbosity_level(o3d.utility.Debug) - - mesh = meshes.bunny() - pcd = mesh.sample_points_poisson_disk(750) - o3d.visualization.draw_geometries([pcd]) - for alpha in np.logspace(np.log10(0.5), np.log10(0.01), num=4): - print("alpha={}".format(alpha)) - mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape( - pcd, alpha) - mesh.compute_vertex_normals() - draw_geometries_with_back_face([mesh]) - - pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply") - o3d.visualization.draw_geometries([pcd]) - print("compute tetra mesh only once") - tetra_mesh, pt_map = o3d.geometry.TetraMesh.create_from_point_cloud(pcd) - print("done with tetra mesh") - for alpha in np.logspace(np.log10(0.5), np.log10(0.01), num=4): - print("alpha={}".format(alpha)) - mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape( - pcd, alpha, tetra_mesh, pt_map) - mesh.compute_vertex_normals() - draw_geometries_with_back_face([mesh]) diff --git a/examples/Python/Advanced/surface_reconstruction_ball_pivoting.py b/examples/Python/Advanced/surface_reconstruction_ball_pivoting.py deleted file mode 100644 index f268566045c..00000000000 --- a/examples/Python/Advanced/surface_reconstruction_ball_pivoting.py +++ /dev/null @@ -1,61 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/surface_reconstruction_ball_pivoting.py - -import open3d as o3d -import numpy as np -import os - -import sys -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - - -def problem_generator(): - o3d.utility.set_verbosity_level(o3d.utility.Debug) - - points = [] - normals = [] - for _ in range(4): - for _ in range(4): - pt = (np.random.uniform(-2, 2), np.random.uniform(-2, 2), 0) - points.append(pt) - normals.append((0, 0, 1)) - points = np.array(points, dtype=np.float64) - normals = np.array(normals, dtype=np.float64) - pcd = o3d.geometry.PointCloud() - pcd.points = o3d.utility.Vector3dVector(points) - pcd.normals = o3d.utility.Vector3dVector(normals) - radii = [1, 2] - yield pcd, radii - - o3d.utility.set_verbosity_level(o3d.utility.Info) - - gt_mesh = o3d.geometry.TriangleMesh.create_sphere() - gt_mesh.compute_vertex_normals() - pcd = gt_mesh.sample_points_poisson_disk(100) - radii = [0.5, 1, 2] - yield pcd, radii - - gt_mesh = meshes.bunny() - gt_mesh.compute_vertex_normals() - pcd = gt_mesh.sample_points_poisson_disk(2000) - radii = [0.005, 0.01, 0.02, 0.04] - yield pcd, radii - - gt_mesh = meshes.armadillo() - gt_mesh.compute_vertex_normals() - pcd = gt_mesh.sample_points_poisson_disk(2000) - radii = [5, 10] - yield pcd, radii - - -if __name__ == "__main__": - for pcd, radii in problem_generator(): - o3d.visualization.draw_geometries([pcd]) - rec_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting( - pcd, o3d.utility.DoubleVector(radii)) - o3d.visualization.draw_geometries([pcd, rec_mesh]) diff --git a/examples/Python/Advanced/surface_reconstruction_poisson.py b/examples/Python/Advanced/surface_reconstruction_poisson.py deleted file mode 100644 index 0042a4c1c3c..00000000000 --- a/examples/Python/Advanced/surface_reconstruction_poisson.py +++ /dev/null @@ -1,47 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/surface_reconstruction_poisson.py - -import open3d as o3d -import numpy as np -import matplotlib.pyplot as plt -import os - -import sys - -sys.path.append( - os.path.join(os.path.dirname(os.path.realpath(__file__)), "../Misc")) -import meshes - -if __name__ == "__main__": - o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) - - pcd = meshes.eagle() - print(pcd) - o3d.visualization.draw_geometries([pcd]) - - print('run Poisson surface reconstruction') - mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson( - pcd, depth=8) - print(mesh) - o3d.visualization.draw_geometries([mesh]) - - print('visualize densities') - densities = np.asarray(densities) - density_colors = plt.get_cmap('plasma')( - (densities - densities.min()) / (densities.max() - densities.min())) - density_colors = density_colors[:, :3] - density_mesh = o3d.geometry.TriangleMesh() - density_mesh.vertices = mesh.vertices - density_mesh.triangles = mesh.triangles - density_mesh.triangle_normals = mesh.triangle_normals - density_mesh.vertex_colors = o3d.utility.Vector3dVector(density_colors) - o3d.visualization.draw_geometries([density_mesh]) - - print('remove low density vertices') - vertices_to_remove = densities < np.quantile(densities, 0.1) - mesh.remove_vertices_by_mask(vertices_to_remove) - print(mesh) - o3d.visualization.draw_geometries([mesh]) diff --git a/examples/Python/Advanced/voxel_carving.py b/examples/Python/Advanced/voxel_carving.py deleted file mode 100644 index 736ea7c6ab8..00000000000 --- a/examples/Python/Advanced/voxel_carving.py +++ /dev/null @@ -1,175 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Advanced/voxel_carving.py - -import open3d as o3d -import numpy as np -import os - -import sys -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - - -def get_extrinsic(xyz): - rvec = xyz_spherical(xyz) - r = get_rotation_matrix(rvec[1], rvec[2]) - t = np.asarray([0, 0, 2]).transpose() - trans = np.eye(4) - trans[:3, :3] = r - trans[:3, 3] = t - return trans - - -def xyz_spherical(xyz): - x = xyz[0] - y = xyz[1] - z = xyz[2] - r = np.sqrt(x * x + y * y + z * z) - r_x = np.arccos(y / r) - r_y = np.arctan2(z, x) - return [r, r_x, r_y] - - -def get_rotation_matrix(r_x, r_y): - rot_x = np.asarray([[1, 0, 0], [0, np.cos(r_x), -np.sin(r_x)], - [0, np.sin(r_x), np.cos(r_x)]]) - rot_y = np.asarray([[np.cos(r_y), 0, np.sin(r_y)], [0, 1, 0], - [-np.sin(r_y), 0, np.cos(r_y)]]) - return rot_y.dot(rot_x) - - -def preprocess(model): - min_bound = model.get_min_bound() - max_bound = model.get_max_bound() - center = min_bound + (max_bound - min_bound) / 2.0 - scale = np.linalg.norm(max_bound - min_bound) / 2.0 - vertices = np.asarray(model.vertices) - vertices -= center - model.vertices = o3d.utility.Vector3dVector(vertices / scale) - return model - - -def voxel_carving(mesh, - output_filename, - camera_path, - cubic_size, - voxel_resolution, - w=300, - h=300, - use_depth=True, - surface_method='pointcloud', - visualization=False): - - mesh.compute_vertex_normals() - camera_sphere = o3d.io.read_triangle_mesh(camera_path) - - voxel_carving = o3d.geometry.VoxelGrid.create_dense( - width=cubic_size, - height=cubic_size, - depth=cubic_size, - voxel_size=cubic_size / voxel_resolution, - origin=[-cubic_size / 2.0, -cubic_size / 2.0, -cubic_size / 2.0]) - - # rescale geometry - camera_sphere = preprocess(camera_sphere) - mesh = preprocess(mesh) - - vis = o3d.visualization.Visualizer() - vis.create_window(width=w, height=h, visible=False) - vis.add_geometry(mesh) - vis.get_render_option().mesh_show_back_face = True - - ctr = vis.get_view_control() - param = ctr.convert_to_pinhole_camera_parameters() - - pcd_agg = o3d.geometry.PointCloud() - centers_pts = np.zeros((len(camera_sphere.vertices), 3)) - i = 0 - for cid, xyz in enumerate(camera_sphere.vertices): - # get new camera pose - trans = get_extrinsic(xyz) - param.extrinsic = trans - c = np.linalg.inv(trans).dot(np.asarray([0, 0, 0, 1]).transpose()) - centers_pts[i, :] = c[:3] - i += 1 - ctr.convert_from_pinhole_camera_parameters(param) - - # capture depth image and make a point cloud - vis.poll_events() - vis.update_renderer() - depth = vis.capture_depth_float_buffer(False) - pcd_agg += o3d.geometry.PointCloud.create_from_depth_image( - o3d.geometry.Image(depth), - param.intrinsic, - param.extrinsic, - depth_scale=1) - - # depth map carving method - if use_depth: - voxel_carving.carve_depth_map(o3d.geometry.Image(depth), param) - else: - voxel_carving.carve_silhouette(o3d.geometry.Image(depth), param) - print("Carve view %03d/%03d" % (cid + 1, len(camera_sphere.vertices))) - - vis.destroy_window() - - print('Surface voxel grid from %s' % surface_method) - if surface_method == 'pointcloud': - voxel_surface = o3d.geometry.VoxelGrid.create_from_point_cloud_within_bounds( - pcd_agg, - voxel_size=cubic_size / voxel_resolution, - min_bound=(-cubic_size / 2, -cubic_size / 2, -cubic_size / 2), - max_bound=(cubic_size / 2, cubic_size / 2, cubic_size / 2)) - elif surface_method == 'mesh': - voxel_surface = o3d.geometry.VoxelGrid.create_from_triangle_mesh_within_bounds( - mesh, - voxel_size=cubic_size / voxel_resolution, - min_bound=(-cubic_size / 2, -cubic_size / 2, -cubic_size / 2), - max_bound=(cubic_size / 2, cubic_size / 2, cubic_size / 2)) - else: - raise Exception('invalid surface method') - - voxel_carving_surface = voxel_surface + voxel_carving - - if (visualization): - print("visualize camera center") - centers = o3d.geometry.PointCloud() - centers.points = o3d.utility.Vector3dVector(centers_pts) - o3d.visualization.draw_geometries([centers, mesh]) - - print("surface voxels from %s" % surface_method) - print(voxel_surface) - o3d.visualization.draw_geometries([voxel_surface]) - - print("carved voxels") - print(voxel_carving) - o3d.visualization.draw_geometries([voxel_carving]) - - print("combined voxels (carved + surface from %s) together with mesh" % - surface_method) - print(voxel_carving_surface) - o3d.visualization.draw_geometries([voxel_carving_surface, mesh]) - - o3d.io.write_voxel_grid(output_filename, voxel_carving_surface) - - -if __name__ == '__main__': - mesh = meshes.armadillo() - - output_filename = os.path.abspath("../../TestData/voxelized.ply") - camera_path = os.path.abspath("../../TestData/sphere.ply") - - visualization = True - cubic_size = 2.0 - voxel_resolution = 128.0 - - voxel_carving(mesh, - output_filename, - camera_path, - cubic_size, - voxel_resolution, - visualization=True) diff --git a/examples/Python/Advanced/voxelization.ipynb b/examples/Python/Advanced/voxelization.ipynb new file mode 100644 index 00000000000..8488f3a1a90 --- /dev/null +++ b/examples/Python/Advanced/voxelization.ipynb @@ -0,0 +1,300 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import copy\n", + "import os\n", + "import sys\n", + "\n", + "# only needed for tutorial, monkey patches visualization\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Voxelization\n", + "Point clouds and triangle meshes are very flexible, but irregular geometry types. The voxel grid is another geometry type in 3D that is defined on a regular 3D grid, whereas a voxel can be thought of as the 3D counterpart to the pixel in 2D. Open3D has the geometry type `VoxelGrid` that can be used to work with voxel grids." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## From triangle mesh\n", + "Open3D provides the method `create_from_triangle_mesh` that creates a voxel grid from a triangle mesh. It returns a voxel grid where all voxels that are intersected by a triangle are set to `1`, all others are set to `0`. The argument `voxel_size` defines the resolution of the voxel grid." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('input')\n", + "mesh = o3dtut.get_bunny_mesh()\n", + "# fit to unit cube\n", + "mesh.scale(1 / np.max(mesh.get_max_bound() - mesh.get_min_bound()))\n", + "o3d.visualization.draw_geometries([mesh])\n", + "\n", + "print('voxelization')\n", + "voxel_grid = o3d.geometry.VoxelGrid.create_from_triangle_mesh(mesh, \n", + " voxel_size=0.05)\n", + "o3d.visualization.draw_geometries([voxel_grid])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## From point cloud\n", + "The voxel grid can also be created from a point cloud using the method `create_from_point_cloud`. A voxel is occupied if at least one point of the point cloud is within the voxel. The color of the voxel is the average of all the points within the voxel. The argument `voxel_size` defines the resolution of the voxel grid." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('input')\n", + "N = 2000\n", + "pcd = o3dtut.get_armadillo_mesh().sample_points_poisson_disk(N)\n", + "# fit to unit cube\n", + "pcd.scale(1 / np.max(pcd.get_max_bound() - pcd.get_min_bound()))\n", + "pcd.colors = o3d.utility.Vector3dVector(np.random.uniform(0,1,size=(N,3)))\n", + "o3d.visualization.draw_geometries([pcd])\n", + "\n", + "print('voxelization')\n", + "voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, \n", + " voxel_size=0.05)\n", + "o3d.visualization.draw_geometries([voxel_grid])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inclusion test\n", + "The voxel grid can also be used to test if points are within an occupied voxel. The method `check_if_included` takes a `(n,3)` array as input and outputs an `bool` array." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "queries = np.asarray(pcd.points)\n", + "output = voxel_grid.check_if_included(o3d.utility.Vector3dVector(queries))\n", + "print(output[:10])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Voxel carving\n", + "The methods `create_from_point_cloud` and `create_from_triangle_mesh` create occupied voxels only on the surface of the geometry. It is however possible to carve a voxel grid from a number of depth maps or silhouettes. Open3D provides the methods `carve_depth_map` and `carve_silhouette` for voxel carving.\n", + "\n", + "The code below demostrates the usage by first rendering depthmaps from a geometry and using those depthmaps to carve a dense voxel grid. The result is a filled voxel grid of the given shape." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def xyz_spherical(xyz):\n", + " x = xyz[0]\n", + " y = xyz[1]\n", + " z = xyz[2]\n", + " r = np.sqrt(x * x + y * y + z * z)\n", + " r_x = np.arccos(y / r)\n", + " r_y = np.arctan2(z, x)\n", + " return [r, r_x, r_y]\n", + "\n", + "def get_rotation_matrix(r_x, r_y):\n", + " rot_x = np.asarray([[1, 0, 0], [0, np.cos(r_x), -np.sin(r_x)],\n", + " [0, np.sin(r_x), np.cos(r_x)]])\n", + " rot_y = np.asarray([[np.cos(r_y), 0, np.sin(r_y)], [0, 1, 0],\n", + " [-np.sin(r_y), 0, np.cos(r_y)]])\n", + " return rot_y.dot(rot_x)\n", + "\n", + "def get_extrinsic(xyz):\n", + " rvec = xyz_spherical(xyz)\n", + " r = get_rotation_matrix(rvec[1], rvec[2])\n", + " t = np.asarray([0, 0, 2]).transpose()\n", + " trans = np.eye(4)\n", + " trans[:3, :3] = r\n", + " trans[:3, 3] = t\n", + " return trans\n", + "\n", + "def preprocess(model):\n", + " min_bound = model.get_min_bound()\n", + " max_bound = model.get_max_bound()\n", + " center = min_bound + (max_bound - min_bound) / 2.0\n", + " scale = np.linalg.norm(max_bound - min_bound) / 2.0\n", + " vertices = np.asarray(model.vertices)\n", + " vertices -= center\n", + " model.vertices = o3d.utility.Vector3dVector(vertices / scale)\n", + " return model\n", + "\n", + "def voxel_carving(mesh,\n", + " output_filename,\n", + " camera_path,\n", + " cubic_size,\n", + " voxel_resolution,\n", + " w=300,\n", + " h=300,\n", + " use_depth=True,\n", + " surface_method='pointcloud'):\n", + " mesh.compute_vertex_normals()\n", + " camera_sphere = o3d.io.read_triangle_mesh(camera_path)\n", + "\n", + " # setup dense voxel grid\n", + " voxel_carving = o3d.geometry.VoxelGrid.create_dense(\n", + " width=cubic_size,\n", + " height=cubic_size,\n", + " depth=cubic_size,\n", + " voxel_size=cubic_size / voxel_resolution,\n", + " origin=[-cubic_size / 2.0, -cubic_size / 2.0, -cubic_size / 2.0])\n", + "\n", + " # rescale geometry\n", + " camera_sphere = preprocess(camera_sphere)\n", + " mesh = preprocess(mesh)\n", + "\n", + " # setup visualizer to render depthmaps\n", + " vis = o3d.visualization.Visualizer()\n", + " vis.create_window(width=w, height=h, visible=False)\n", + " vis.add_geometry(mesh)\n", + " vis.get_render_option().mesh_show_back_face = True\n", + " ctr = vis.get_view_control()\n", + " param = ctr.convert_to_pinhole_camera_parameters()\n", + "\n", + " # carve voxel grid\n", + " pcd_agg = o3d.geometry.PointCloud()\n", + " centers_pts = np.zeros((len(camera_sphere.vertices), 3))\n", + " for cid, xyz in enumerate(camera_sphere.vertices):\n", + " # get new camera pose\n", + " trans = get_extrinsic(xyz)\n", + " param.extrinsic = trans\n", + " c = np.linalg.inv(trans).dot(np.asarray([0, 0, 0, 1]).transpose())\n", + " centers_pts[cid, :] = c[:3]\n", + " ctr.convert_from_pinhole_camera_parameters(param)\n", + "\n", + " # capture depth image and make a point cloud\n", + " vis.poll_events()\n", + " vis.update_renderer()\n", + " depth = vis.capture_depth_float_buffer(False)\n", + " pcd_agg += o3d.geometry.PointCloud.create_from_depth_image(\n", + " o3d.geometry.Image(depth),\n", + " param.intrinsic,\n", + " param.extrinsic,\n", + " depth_scale=1)\n", + "\n", + " # depth map carving method\n", + " if use_depth:\n", + " voxel_carving.carve_depth_map(o3d.geometry.Image(depth), param)\n", + " else:\n", + " voxel_carving.carve_silhouette(o3d.geometry.Image(depth), param)\n", + " print(\"Carve view %03d/%03d\" % (cid + 1, len(camera_sphere.vertices)))\n", + " vis.destroy_window()\n", + "\n", + " # add voxel grid survace\n", + " print('Surface voxel grid from %s' % surface_method)\n", + " if surface_method == 'pointcloud':\n", + " voxel_surface = o3d.geometry.VoxelGrid.create_from_point_cloud_within_bounds(\n", + " pcd_agg,\n", + " voxel_size=cubic_size / voxel_resolution,\n", + " min_bound=(-cubic_size / 2, -cubic_size / 2, -cubic_size / 2),\n", + " max_bound=(cubic_size / 2, cubic_size / 2, cubic_size / 2))\n", + " elif surface_method == 'mesh':\n", + " voxel_surface = o3d.geometry.VoxelGrid.create_from_triangle_mesh_within_bounds(\n", + " mesh,\n", + " voxel_size=cubic_size / voxel_resolution,\n", + " min_bound=(-cubic_size / 2, -cubic_size / 2, -cubic_size / 2),\n", + " max_bound=(cubic_size / 2, cubic_size / 2, cubic_size / 2))\n", + " else:\n", + " raise Exception('invalid surface method')\n", + " voxel_carving_surface = voxel_surface + voxel_carving\n", + " \n", + " return voxel_carving_surface, voxel_carving, voxel_surface" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3dtut.get_armadillo_mesh()\n", + "\n", + "output_filename = os.path.abspath(\"../../TestData/voxelized.ply\")\n", + "camera_path = os.path.abspath(\"../../TestData/sphere.ply\")\n", + "visualization = True\n", + "cubic_size = 2.0\n", + "voxel_resolution = 128.0\n", + "\n", + "voxel_grid, voxel_carving, voxel_surface = voxel_carving(\n", + " mesh, output_filename, camera_path,\n", + " cubic_size, voxel_resolution)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"surface voxels\")\n", + "print(voxel_surface)\n", + "o3d.visualization.draw_geometries([voxel_surface])\n", + "\n", + "print(\"carved voxels\")\n", + "print(voxel_carving)\n", + "o3d.visualization.draw_geometries([voxel_carving])\n", + "\n", + "print(\"combined voxels (carved + surface)\")\n", + "print(voxel_grid)\n", + "o3d.visualization.draw_geometries([voxel_grid])" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/Python/Basic/bounding_volume.py b/examples/Python/Basic/bounding_volume.py deleted file mode 100644 index 28e22617fa6..00000000000 --- a/examples/Python/Basic/bounding_volume.py +++ /dev/null @@ -1,70 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/bounding_volume.py - -import numpy as np -import open3d as o3d -import os - -import sys - -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, "../Misc")) -import meshes - -np.random.seed(42) - - -def mesh_generator(): - mesh = o3d.geometry.TriangleMesh.create_box() - mesh.rotate(mesh.get_rotation_matrix_from_xyz((0.3, 0.5, 0.1))) - yield "rotated box mesh", mesh - yield "rotated box pcd", mesh.sample_points_uniformly(500) - - mesh = meshes.armadillo() - yield "armadillo mesh", mesh - yield "armadillo pcd", mesh.sample_points_uniformly(500) - - -if __name__ == "__main__": - for name, geom in mesh_generator(): - aabox = geom.get_axis_aligned_bounding_box() - print("%s has an axis aligned box volume of %f" % - (name, aabox.volume())) - obox = geom.get_oriented_bounding_box() - print("%s has an oriented box volume of %f" % (name, obox.volume())) - aabox.color = [1, 0, 0] - obox.color = [0, 1, 0] - o3d.visualization.draw_geometries([geom, aabox, obox]) - - mesh = meshes.armadillo() - - bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound=(-30, 0, -10), - max_bound=(10, 20, 10)) - o3d.visualization.draw_geometries([mesh, bbox]) - o3d.visualization.draw_geometries([mesh.crop(bbox), bbox]) - - bbox = o3d.geometry.OrientedBoundingBox( - center=(-10, 10, 0), - R=bbox.get_rotation_matrix_from_xyz((2, 1, 0)), - extent=(40, 20, 20), - ) - o3d.visualization.draw_geometries([mesh, bbox]) - o3d.visualization.draw_geometries([mesh.crop(bbox), bbox]) - - pcd = mesh.sample_points_uniformly(500000) - - bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound=(-30, 0, -10), - max_bound=(10, 20, 10)) - o3d.visualization.draw_geometries([pcd, bbox]) - o3d.visualization.draw_geometries([pcd.crop(bbox), bbox]) - - bbox = o3d.geometry.OrientedBoundingBox( - center=(-10, 10, 0), - R=bbox.get_rotation_matrix_from_xyz((2, 1, 0)), - extent=(40, 20, 20), - ) - o3d.visualization.draw_geometries([pcd, bbox]) - o3d.visualization.draw_geometries([pcd.crop(bbox), bbox]) diff --git a/examples/Python/Basic/clustering.py b/examples/Python/Basic/clustering.py deleted file mode 100644 index fb8bf497973..00000000000 --- a/examples/Python/Basic/clustering.py +++ /dev/null @@ -1,53 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/clustering.py - -import numpy as np -import open3d as o3d -import matplotlib.pyplot as plt - -np.random.seed(42) - - -def pointcloud_generator(): - yield "cube", o3d.geometry.TriangleMesh.create_sphere( - ).sample_points_uniformly(int(1e4)), 0.4 - - mesh = o3d.geometry.TriangleMesh.create_torus().scale(5) - mesh += o3d.geometry.TriangleMesh.create_torus() - yield "torus", mesh.sample_points_uniformly(int(1e4)), 0.75 - - d = 4 - mesh = o3d.geometry.TriangleMesh.create_tetrahedron().translate((-d, 0, 0)) - mesh += o3d.geometry.TriangleMesh.create_octahedron().translate((0, 0, 0)) - mesh += o3d.geometry.TriangleMesh.create_icosahedron().translate((d, 0, 0)) - mesh += o3d.geometry.TriangleMesh.create_torus().translate((-d, -d, 0)) - mesh += o3d.geometry.TriangleMesh.create_moebius(twists=1).translate( - (0, -d, 0)) - mesh += o3d.geometry.TriangleMesh.create_moebius(twists=2).translate( - (d, -d, 0)) - yield "shapes", mesh.sample_points_uniformly(int(1e5)), 0.5 - - yield "fragment", o3d.io.read_point_cloud( - "../../TestData/fragment.ply"), 0.02 - - -if __name__ == "__main__": - o3d.utility.set_verbosity_level(o3d.utility.Debug) - - cmap = plt.get_cmap("tab20") - for pcl_name, pcl, eps in pointcloud_generator(): - print("%s has %d points" % (pcl_name, np.asarray(pcl.points).shape[0])) - o3d.visualization.draw_geometries([pcl]) - - labels = np.array( - pcl.cluster_dbscan(eps=eps, min_points=10, print_progress=True)) - max_label = labels.max() - print("%s has %d clusters" % (pcl_name, max_label + 1)) - - colors = cmap(labels / (max_label if max_label > 0 else 1)) - colors[labels < 0] = 0 - pcl.colors = o3d.utility.Vector3dVector(colors[:, :3]) - o3d.visualization.draw_geometries([pcl]) diff --git a/examples/Python/Basic/convex_hull.py b/examples/Python/Basic/convex_hull.py deleted file mode 100644 index 609db0512d7..00000000000 --- a/examples/Python/Basic/convex_hull.py +++ /dev/null @@ -1,36 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/convex_hull.py - -import open3d as o3d - -import os -import sys -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - - -def mesh_generator(): - yield o3d.geometry.TriangleMesh.create_box() - yield o3d.geometry.TriangleMesh.create_sphere() - yield meshes.knot() - yield meshes.bunny() - yield meshes.armadillo() - - -if __name__ == "__main__": - for mesh in mesh_generator(): - mesh.compute_vertex_normals() - hull, _ = mesh.compute_convex_hull() - hull_ls = o3d.geometry.LineSet.create_from_triangle_mesh(hull) - hull_ls.paint_uniform_color((1, 0, 0)) - o3d.visualization.draw_geometries([mesh, hull_ls]) - - pcl = mesh.sample_points_poisson_disk(number_of_points=2000) - hull, _ = pcl.compute_convex_hull() - hull_ls = o3d.geometry.LineSet.create_from_triangle_mesh(hull) - hull_ls.paint_uniform_color((1, 0, 0)) - o3d.visualization.draw_geometries([pcl, hull_ls]) diff --git a/examples/Python/Basic/file_io.ipynb b/examples/Python/Basic/file_io.ipynb new file mode 100644 index 00000000000..0eb7d5696ae --- /dev/null +++ b/examples/Python/Basic/file_io.ipynb @@ -0,0 +1,166 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# File IO\n", + "This tutorial shows how basic geometries are read and written by Open3D." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Point cloud\n", + "This code below reads and writes a point cloud." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Testing IO for point cloud ...\")\n", + "pcd = o3d.io.read_point_cloud(\"../../TestData/fragment.pcd\")\n", + "print(pcd)\n", + "o3d.io.write_point_cloud(\"copy_of_fragment.pcd\", pcd)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`print()` can be used for displaying a summary of pcd.\n", + "\n", + "By default, Open3D tries to infer the file type by the filename extension. Below is a list of supported point cloud file types.\n", + "\n", + "Format | Description\n", + "---------|---------------\n", + "`xyz` | Each line contains `[x, y, z]`, where `x`, `y`, `z` are the 3D coordinates\n", + "`xyzn` | Each line contains `[x, y, z, nx, ny, nz]`, where `nx`, `ny`, `nz` are the normals\n", + "`xyzrgb` | Each line contains `[x, y, z, r, g, b]`, where `r`, `g`, `b` are in floats of range `[0, 1]`\n", + "`pts` | The first line is an integer representing the number of points. Each subsequent line contains `[x, y, z, i, r, g, b]`, where `r`, `g`, `b` are in `uint8`\n", + "`ply` | See [Polygon File Format](http://paulbourke.net/dataformats/ply), the ply file can contain both point cloud and mesh data\n", + "`pcd` | See [Point Cloud Data](http://pointclouds.org/documentation/tutorials/pcd_file_format.php)\n", + "\n", + "It’s also possible to specify the file type explicitly. In this case, the file extension will be ignored." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcd = o3d.io.read_point_cloud(\"../../TestData/my_points.txt\", format='xyz')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mesh\n", + "This code below reads and writes a mesh." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Testing IO for meshes ...\")\n", + "mesh = o3d.io.read_triangle_mesh(\"../../TestData/knot.ply\")\n", + "print(mesh)\n", + "o3d.io.write_triangle_mesh(\"copy_of_knot.ply\", mesh)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Compared to the data structure of point cloud, mesh has triangles that define the 3D surface.\n", + "\n", + "By default, Open3D tries to infer the file type by the filename extension. Below is a list of supported triangle mesh file types.\n", + "\n", + "Format | Description\n", + "---------|---------------\n", + "`ply` | See [Polygon File Format](http://paulbourke.net/dataformats/ply/), the ply file can contain both point cloud and mesh data\n", + "`stl` | See [StereoLithography](http://www.fabbers.com/tech/STL_Format)\n", + "`obj` | See [Object Files](http://paulbourke.net/dataformats/obj/)\n", + "`off` | See [Object File Format](http://www.geomview.org/docs/html/OFF.html)\n", + "`gltf` | See [GL Transmission Format](https://github.com/KhronosGroup/glTF/tree/master/specification/2.0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Image\n", + "The code below reads and writes an image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Testing IO for images ...\")\n", + "img = o3d.io.read_image(\"../../TestData/lena_color.jpg\")\n", + "print(img)\n", + "o3d.io.write_image(\"copy_of_lena_color.jpg\", img)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The size of the image is readily displayed using `print(img)`." + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Basic/file_io.py b/examples/Python/Basic/file_io.py deleted file mode 100644 index c68d7ac2659..00000000000 --- a/examples/Python/Basic/file_io.py +++ /dev/null @@ -1,33 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/file_io.py - -import open3d as o3d - -if __name__ == "__main__": - - print("Testing IO for point cloud ...") - pcd = o3d.io.read_point_cloud("../../TestData/fragment.pcd") - print(pcd) - o3d.io.write_point_cloud("copy_of_fragment.pcd", pcd) - - print("Testing IO for meshes ...") - mesh = o3d.io.read_triangle_mesh("../../TestData/knot.ply") - print(mesh) - o3d.io.write_triangle_mesh("copy_of_knot.ply", mesh) - - print("Testing IO for textured meshes ...") - textured_mesh = o3d.io.read_triangle_mesh("../../TestData/crate/crate.obj") - print(textured_mesh) - o3d.io.write_triangle_mesh("copy_of_crate.obj", - textured_mesh, - write_triangle_uvs=True) - copy_textured_mesh = o3d.io.read_triangle_mesh('copy_of_crate.obj') - print(copy_textured_mesh) - - print("Testing IO for images ...") - img = o3d.io.read_image("../../TestData/lena_color.jpg") - print(img) - o3d.io.write_image("copy_of_lena_color.jpg", img) diff --git a/examples/Python/Basic/hidden_point_removal.py b/examples/Python/Basic/hidden_point_removal.py deleted file mode 100644 index 46967367742..00000000000 --- a/examples/Python/Basic/hidden_point_removal.py +++ /dev/null @@ -1,69 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/hidden_point_removal.py - -import numpy as np -import open3d as o3d -import meshes - - -def mesh_generator(): - yield o3d.geometry.TriangleMesh.create_sphere() - yield meshes.knot() - yield meshes.bunny() - yield meshes.armadillo() - - -if __name__ == "__main__": - - for mesh in mesh_generator(): - - print("Convert mesh to a point cloud and estimate dimensions") - pcl = o3d.geometry.PointCloud() - pcl.points = mesh.vertices - pcl.colors = mesh.vertex_colors - diameter = np.linalg.norm( - np.asarray(pcl.get_max_bound()) - np.asarray(pcl.get_min_bound())) - - print("Define parameters used for hidden_point_removal") - camera = [diameter, diameter * 0.5, diameter * 0.5] - radius = diameter * 100 - - print("Create coordinate frame for visualizing the camera location") - camera_frame = o3d.geometry.TriangleMesh.create_coordinate_frame( - size=diameter / 5, origin=camera) - - print("Remove all hidden points viewed from the camera location") - hull, _ = pcl.hidden_point_removal(camera, radius) - - print("Visualize result") - hull_ls = o3d.geometry.LineSet.create_from_triangle_mesh(hull) - hull_ls.paint_uniform_color((1, 0, 0)) - pcl.paint_uniform_color((0.5, 0.5, 1)) - o3d.visualization.draw_geometries([pcl, hull_ls, camera_frame]) - - print("Create a point cloud representing a sphere") - mesh = o3d.geometry.TriangleMesh.create_sphere() - pcl = o3d.geometry.PointCloud() - pcl.points = mesh.vertices - - print("Assign colors based on their index (green to red)") - l = len(pcl.points) - colors = np.array( - [np.arange(0, l, 1) / l, - np.arange(l, 0, -1) / l, - np.zeros(l)]).transpose() - pcl.colors = o3d.utility.Vector3dVector(colors) - - print("Remove all hidden points viewed from the camera location") - mesh, pt_map = pcl.hidden_point_removal([4, 0, 0], 100) - - print("Add back colors using the point map") - mesh.vertex_colors = o3d.utility.Vector3dVector(colors[np.asarray(pt_map)]) - - print("Visualize the result") - mesh.compute_vertex_normals() - mesh.orient_triangles() - o3d.visualization.draw_geometries([mesh, pcl]) diff --git a/examples/Python/Basic/icp_registration.ipynb b/examples/Python/Basic/icp_registration.ipynb new file mode 100644 index 00000000000..711c394c6a6 --- /dev/null +++ b/examples/Python/Basic/icp_registration.ipynb @@ -0,0 +1,256 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import copy\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ICP Registration\n", + "This tutorial demonstrates the ICP (Iterative Closest Point) registration algorithm. It has been a mainstay of geometric registration in both research and industry for many years. The input are two point clouds and an initial transformation that roughly aligns the source point cloud to the target point cloud. The output is a refined transformation that tightly aligns the two point clouds. A helper function draw_registration_result visualizes the alignment during the registration process. In this tutorial, we show two ICP variants, the point-to-point ICP and the point-to-plane ICP [Rusinkiewicz2001]_." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Helper visualization function\n", + "The function below visualizes a target point cloud, and a source point cloud transformed with an alignment transformation. The target point cloud and the source point cloud are painted with cyan and yellow colors respectively. The more and tighter the two point clouds overlap with each other, the better the alignment result is." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def draw_registration_result(source, target, transformation):\n", + " source_temp = copy.deepcopy(source)\n", + " target_temp = copy.deepcopy(target)\n", + " source_temp.paint_uniform_color([1, 0.706, 0])\n", + " target_temp.paint_uniform_color([0, 0.651, 0.929])\n", + " source_temp.transform(transformation)\n", + " o3d.visualization.draw_geometries([source_temp, target_temp], \n", + " zoom=0.4459,\n", + " front=[0.9288, -0.2951, -0.2242],\n", + " lookat=[1.6784, 2.0612, 1.4451],\n", + " up=[-0.3402, -0.9189, -0.1996])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "**Note:** \n", + "\n", + "Since the functions `transform` and `paint_uniform_color` change the point cloud, we call `copy.deepcopy` to make copies and protect the original point clouds.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Input\n", + "This code below reads a source point cloud and a target point cloud from two files. A rough transformation is given.\n", + "\n", + "
\n", + " \n", + "**Note:** \n", + "\n", + "The initial alignment is usually obtained by a global registration algorithm. See [Global registration](../Advanced/global_registration.rst) for examples.\n", + "\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "source = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_0.pcd\")\n", + "target = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_1.pcd\")\n", + "threshold = 0.02\n", + "trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],\n", + " [-0.139, 0.967, -0.215, 0.7],\n", + " [0.487, 0.255, 0.835, -1.4], \n", + " [0.0, 0.0, 0.0, 1.0]])\n", + "draw_registration_result(source, target, trans_init)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The function `evaluate_registration` calculates two main metrics. `fitness` measures the overlapping area (# of inlier correspondences / # of points in target). Higher the better. `inlier_rmse` measures the RMSE of all inlier correspondences. The lower the better." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Initial alignment\")\n", + "evaluation = o3d.registration.evaluate_registration(source, target, threshold, trans_init)\n", + "print(evaluation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Point-to-point ICP\n", + "In general, the ICP algorithm iterates over two steps:\n", + "\n", + "1. Find correspondence set $\\mathcal{K}=\\{(\\mathbf{p}, \\mathbf{q})\\}$ from target point cloud $\\mathbf{P}$, and source point cloud $\\mathbf{Q}$ transformed with current transformation matrix $\\mathbf{T}$.\n", + "\n", + "Update the transformation $\\mathbf{T}$ by minimizing an objective function $E(\\mathbf{T})$ defined over the correspondence set $\\mathcal{K}$.\n", + "\n", + "Different variants of ICP use different objective functions $E(\\mathbf{T})$ [\\[BeslAndMcKay1992\\]](../reference.html#beslandmckay1992) [\\[ChenAndMedioni1992\\]](../reference.html#chenandmedioni1992) [\\[Park2017\\]](../reference.html#park2017).\n", + "\n", + "We first show a point-to-point ICP algorithm [\\[BeslAndMcKay1992\\]](../reference.html#beslandmckay1992) using an objective\n", + "\n", + "\\begin{equation}\n", + "E(\\mathbf{T}) = \\sum_{(\\mathbf{p},\\mathbf{q})\\in\\mathcal{K}}\\|\\mathbf{p} - \\mathbf{T}\\mathbf{q}\\|^{2}\n", + "\\end{equation}\n", + "\n", + "Class `TransformationEstimationPointToPoint` provides functions to compute the residuals and Jacobian matrices of the point-to-point ICP objective. Function `registration_icp` takes it as a parameter and runs point-to-point ICP to obtain results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Apply point-to-point ICP\")\n", + "reg_p2p = o3d.registration.registration_icp(\n", + " source, target, threshold, trans_init,\n", + " o3d.registration.TransformationEstimationPointToPoint())\n", + "print(reg_p2p)\n", + "print(\"Transformation is:\")\n", + "print(reg_p2p.transformation)\n", + "draw_registration_result(source, target, reg_p2p.transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `fitness` score increases from 0.174723 to 0.372450. The `inlier_rmse` reduces from 0.011771 to 0.007760. By default, `registration_icp` runs until convergence or reaches a maximum number of iterations (30 by default). It can be changed to allow more computation time and to improve the results further." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reg_p2p = o3d.registration.registration_icp(source, target, threshold, trans_init,\n", + " o3d.registration.TransformationEstimationPointToPoint(),\n", + " o3d.registration.ICPConvergenceCriteria(max_iteration = 2000))\n", + "print(reg_p2p)\n", + "print(\"Transformation is:\")\n", + "print(reg_p2p.transformation)\n", + "draw_registration_result(source, target, reg_p2p.transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The final alignment is tight. The `fitness` score improves to 0.621123. The `inlier_rmse` reduces to 0.006583." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Point-to-plane ICP\n", + "The point-to-plane ICP algorithm [\\[ChenAndMedioni1992\\]](../reference.html#chenandmedioni1992) uses a different objective function\n", + "\n", + "\\begin{equation}\n", + "E(\\mathbf{T}) = \\sum_{(\\mathbf{p},\\mathbf{q})\\in\\mathcal{K}}\\big((\\mathbf{p} - \\mathbf{T}\\mathbf{q})\\cdot\\mathbf{n}_{\\mathbf{p}}\\big)^{2},\n", + "\\end{equation}\n", + "\n", + "where $\\mathbf{n}_{\\mathbf{p}}$ is the normal of point $\\mathbf{p}$. [\\[Rusinkiewicz2001\\]](../reference.html#rusinkiewicz2001) has shown that the point-to-plane ICP algorithm has a faster convergence speed than the point-to-point ICP algorithm.\n", + "\n", + "`registration_icp` is called with a different parameter `TransformationEstimationPointToPlane`. Internally, this class implements functions to compute the residuals and Jacobian matrices of the point-to-plane ICP objective." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Apply point-to-plane ICP\")\n", + "reg_p2l = o3d.registration.registration_icp(\n", + " source, target, threshold, trans_init,\n", + " o3d.registration.TransformationEstimationPointToPlane())\n", + "print(reg_p2l)\n", + "print(\"Transformation is:\")\n", + "print(reg_p2l.transformation)\n", + "draw_registration_result(source, target, reg_p2l.transformation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The point-to-plane ICP reaches tight alignment within 30 iterations (`fitness` 0.620972 and `inlier_rmse` 0.006581).\n", + "\n", + "
\n", + " \n", + "The point-to-plane ICP algorithm uses point normals. In this tutorial, we load normals from files. If normals are not given, they can be computed with [Vertex normal estimation](pointcloud.ipynb#vertex-normal-estimation).\n", + "\n", + "
" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Basic/icp_registration.py b/examples/Python/Basic/icp_registration.py deleted file mode 100644 index 0382a01ddfc..00000000000 --- a/examples/Python/Basic/icp_registration.py +++ /dev/null @@ -1,52 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/icp_registration.py - -import open3d as o3d -import numpy as np -import copy - - -def draw_registration_result(source, target, transformation): - source_temp = copy.deepcopy(source) - target_temp = copy.deepcopy(target) - source_temp.paint_uniform_color([1, 0.706, 0]) - target_temp.paint_uniform_color([0, 0.651, 0.929]) - source_temp.transform(transformation) - o3d.visualization.draw_geometries([source_temp, target_temp]) - - -if __name__ == "__main__": - source = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") - target = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd") - threshold = 0.02 - trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5], - [-0.139, 0.967, -0.215, 0.7], - [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]]) - draw_registration_result(source, target, trans_init) - print("Initial alignment") - evaluation = o3d.registration.evaluate_registration(source, target, - threshold, trans_init) - print(evaluation) - - print("Apply point-to-point ICP") - reg_p2p = o3d.registration.registration_icp( - source, target, threshold, trans_init, - o3d.registration.TransformationEstimationPointToPoint()) - print(reg_p2p) - print("Transformation is:") - print(reg_p2p.transformation) - print("") - draw_registration_result(source, target, reg_p2p.transformation) - - print("Apply point-to-plane ICP") - reg_p2l = o3d.registration.registration_icp( - source, target, threshold, trans_init, - o3d.registration.TransformationEstimationPointToPlane()) - print(reg_p2l) - print("Transformation is:") - print(reg_p2l.transformation) - print("") - draw_registration_result(source, target, reg_p2l.transformation) diff --git a/examples/Python/Basic/jupyter.ipynb b/examples/Python/Basic/jupyter.ipynb new file mode 100644 index 00000000000..4396e774f90 --- /dev/null +++ b/examples/Python/Basic/jupyter.ipynb @@ -0,0 +1,72 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Jupyter Visualization\n", + "Since version 0.4.0, we added experimental support for Jupyter visualization with WebGL. If Open3D is installed from pip or conda repository, Jupyter supported is enabled by default. If Open3D is compiled from source, please refer to Setup Python binding environments on how to [build Open3D with Jupyter visualization support](../../compilation.rst#jupyter-visualization-widgets-support-experimental).\n", + "\n", + "Note that Jupyter Visualization is still at an early experimental stage. Here are the main limitations:\n", + "\n", + "1. Only point cloud geometry is supported.\n", + "2. Camera is initialized with fixed parameters, therefore, the initial view may not be optimal for the point cloud.\n", + "3. Performance is not optimized.\n", + "\n", + "## Controls\n", + "\n", + "- Mouse wheel: zoom in/out\n", + "- Left mouse button drag: rotate axis\n", + "- Right mouse button drag: panning\n", + "\n", + "## Example usage\n", + "Jupyter visualizer is defined in the `JVisualizer` class. Initialize the class, call `add_geometry` to add an Open3D geometry, and then call the `show` to display the Jupyter widgets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import open3d as o3\n", + "from open3d import JVisualizer\n", + "\n", + "pts_path = \"examples/TestData/fragment.ply\"\n", + "fragment = o3.read_point_cloud(pts_path)\n", + "visualizer = JVisualizer()\n", + "visualizer.add_geometry(fragment)\n", + "visualizer.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the point cloud is not visible (due to the fixed camera initialization), try first zooming in/out with mouse wheel, and dragging right button of the mouse to pan." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/Python/Basic/kdtree.ipynb b/examples/Python/Basic/kdtree.ipynb new file mode 100644 index 00000000000..1c94223b6d5 --- /dev/null +++ b/examples/Python/Basic/kdtree.ipynb @@ -0,0 +1,156 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# KDTree\n", + "Open3D uses [FLANN](https://www.cs.ubc.ca/research/flann/) to build KDTrees for fast retrieval of nearest neighbors." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Build KDTree from point cloud\n", + "The code below reads a point cloud and builds a KDTree. This is a preprocessing step for the following nearest neighbor queries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Testing kdtree in open3d ...\")\n", + "print(\"Load a point cloud and paint it gray.\")\n", + "pcd = o3d.io.read_point_cloud(\"../../TestData/Feature/cloud_bin_0.pcd\")\n", + "pcd.paint_uniform_color([0.5, 0.5, 0.5])\n", + "pcd_tree = o3d.geometry.KDTreeFlann(pcd)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Find neighboring points\n", + "We pick the 1500-th point as the anchor point and paint it red." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Paint the 1500th point red.\")\n", + "pcd.colors[1500] = [1, 0, 0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Using search_knn_vector_3d\n", + "The function `search_knn_vector_3d` returns a list of indices of the k nearest neighbors of the anchor point. These neighboring points are painted with blue color. Note that we convert `pcd.colors` to a numpy array to make batch access to the point colors, and broadcast a blue color [0, 0, 1] to all the selected points. We skip the first index since it is the anchor point itself." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Find its 200 nearest neighbors, paint blue.\")\n", + "[k, idx, _] = pcd_tree.search_knn_vector_3d(pcd.points[1500], 200)\n", + "np.asarray(pcd.colors)[idx[1:], :] = [0, 0, 1]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Using search_radius_vector_3d\n", + "Similarly, we can use `search_radius_vector_3d` to query all points with distances to the anchor point less than a given radius. We paint these points with green color." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Find its neighbors with distance less than 0.2, paint green.\")\n", + "[k, idx, _] = pcd_tree.search_radius_vector_3d(pcd.points[1500], 0.2)\n", + "np.asarray(pcd.colors)[idx[1:], :] = [0, 1, 0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Visualize the point cloud.\")\n", + "o3d.visualization.draw_geometries([pcd], zoom=0.5599,\n", + " front=[-0.4958, 0.8229, 0.2773],\n", + " lookat=[2.1126, 1.0163, -1.8543],\n", + " up=[0.1007, -0.2626, 0.9596])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "**Note:** \n", + "\n", + "Besides the KNN search `search_knn_vector_3d` and the RNN search `search_radius_vector_3d`, Open3D provides a hybrid search function `search_hybrid_vector_3d`. It returns at most k nearest neighbors that have distances to the anchor point less than a given radius. This function combines the criteria of KNN search and RNN search. It is known as RKNN search in some literatures. It has performance benefits in many practical cases, and is heavily used in a number of Open3D functions.\n", + "\n", + "
" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Basic/kdtree.py b/examples/Python/Basic/kdtree.py deleted file mode 100644 index 18de48f0055..00000000000 --- a/examples/Python/Basic/kdtree.py +++ /dev/null @@ -1,31 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/kdtree.py - -import numpy as np -import open3d as o3d - -if __name__ == "__main__": - - print("Testing kdtree in open3d ...") - print("Load a point cloud and paint it gray.") - pcd = o3d.io.read_point_cloud("../../TestData/Feature/cloud_bin_0.pcd") - pcd.paint_uniform_color([0.5, 0.5, 0.5]) - pcd_tree = o3d.geometry.KDTreeFlann(pcd) - - print("Paint the 1500th point red.") - pcd.colors[1500] = [1, 0, 0] - - print("Find its 200 nearest neighbors, paint blue.") - [k, idx, _] = pcd_tree.search_knn_vector_3d(pcd.points[1500], 200) - np.asarray(pcd.colors)[idx[1:], :] = [0, 0, 1] - - print("Find its neighbors with distance less than 0.2, paint green.") - [k, idx, _] = pcd_tree.search_radius_vector_3d(pcd.points[1500], 0.2) - np.asarray(pcd.colors)[idx[1:], :] = [0, 1, 0] - - print("Visualize the point cloud.") - o3d.visualization.draw_geometries([pcd]) - print("") diff --git a/examples/Python/Basic/mesh.ipynb b/examples/Python/Basic/mesh.ipynb new file mode 100644 index 00000000000..9a0cbab47dd --- /dev/null +++ b/examples/Python/Basic/mesh.ipynb @@ -0,0 +1,641 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import copy\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Mesh\n", + "Open3D has a data structure for 3D triangle meshes called `TriangleMesh`.\n", + "The code below shows how to read a triangle mesh from a `ply` and prints its vertices and triangles." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Testing mesh in open3d ...\")\n", + "mesh = o3dtut.get_knot_mesh()\n", + "print(mesh)\n", + "print('Vertices:')\n", + "print(np.asarray(mesh.vertices))\n", + "print('Triangles:')\n", + "print(np.asarray(mesh.triangles))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `TriangleMesh` class has a few data fields such as `vertices` and `triangles`. Open3D provides direct memory access to these fields via numpy." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize a 3D mesh" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Try to render a mesh with normals (exist: \" +\n", + " str(mesh.has_vertex_normals()) + \") and colors (exist: \" +\n", + " str(mesh.has_vertex_colors()) + \")\")\n", + "o3d.visualization.draw_geometries([mesh])\n", + "print(\"A mesh with no normals and no colors does not look good.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can rotate and move the mesh but it is painted with uniform gray color and does not look “3d”. The reason is that the current mesh does not have normals for vertices or faces. So uniform color shading is used instead of a more sophisticated Phong shading." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Surface normal estimation\n", + "Let’s draw the mesh with surface normals." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Computing normal and rendering it.\")\n", + "mesh.compute_vertex_normals()\n", + "print(np.asarray(mesh.triangle_normals))\n", + "o3d.visualization.draw_geometries([mesh])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It uses `compute_vertex_normals` and `paint_uniform_color` which are member functions of `mesh`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Crop mesh\n", + "We remove half of the surface by directly operating on the t`riangle` and `triangle_normals` data fields of the mesh. This is done via numpy." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"We make a partial mesh of only the first half triangles.\")\n", + "mesh1 = copy.deepcopy(mesh)\n", + "mesh1.triangles = o3d.utility.Vector3iVector(\n", + " np.asarray(mesh1.triangles)[:len(mesh1.triangles) // 2, :])\n", + "mesh1.triangle_normals = o3d.utility.Vector3dVector(\n", + " np.asarray(mesh1.triangle_normals)[:len(mesh1.triangle_normals) // 2, :])\n", + "print(mesh1.triangles)\n", + "o3d.visualization.draw_geometries([mesh1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Paint mesh\n", + "Painting mesh is the same as how it worked for point cloud." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Painting the mesh\")\n", + "mesh1.paint_uniform_color([1, 0.706, 0])\n", + "o3d.visualization.draw_geometries([mesh1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mesh properties\n", + "A triangle mesh has several properties that can be tested with Open3D. One important property is the manifold property, where we can test the triangle mesh if it is edge manifold `is_edge_manifold` and if it is `is_vertex_manifold`. A triangle mesh is edge manifold, if each edge is bounding either one or two triangles. The function `is_edge_manifold` has the `bool` parameter `allow_boundary_edges` that defines if boundary edges should be allowed. Further, a triangle mesh is vertex manifold if the star of the vertex is edge‐manifold and edge‐connected, e.g., two or more faces connected only by a vertex and not by an edge.\n", + "\n", + "Another property is the test of self-intersection. The function `is_self_intersecting` returns `True` if there exists a triangle in the mesh that is intersecting another mesh. A watertight mesh can be defined as a mesh that is edge manifold, vertex manifold and not self intersecting. The function `is_watertight` implements this check in Open3D.\n", + "\n", + "We also can test the triangle mesh, if it is orientable, i.e. the triangles can be oriented in such a way that all normals point towards the outside. The corresponding function in Open3D is called `is_orientable`.\n", + "\n", + "The code below test a number of triangle meshes against those properties and visualizes the results. Non-manifold edges are shown in red, boundary edges in green, non-manifold vertices are visualized as green points, and self-intersecting triangles are shown in pink. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def check_properties(name, mesh):\n", + " mesh.compute_vertex_normals()\n", + " \n", + " edge_manifold = mesh.is_edge_manifold(allow_boundary_edges=True)\n", + " edge_manifold_boundary = mesh.is_edge_manifold(allow_boundary_edges=False)\n", + " vertex_manifold = mesh.is_vertex_manifold()\n", + " self_intersecting = mesh.is_self_intersecting()\n", + " watertight = mesh.is_watertight()\n", + " orientable = mesh.is_orientable()\n", + " \n", + " print(name)\n", + " print(f\" edge_manifold: {edge_manifold}\")\n", + " print(f\" edge_manifold_boundary: {edge_manifold_boundary}\")\n", + " print(f\" vertex_manifold: {vertex_manifold}\")\n", + " print(f\" self_intersecting: {self_intersecting}\")\n", + " print(f\" watertight: {watertight}\")\n", + " print(f\" orientable: {orientable}\")\n", + "\n", + " geoms = [mesh]\n", + " if not edge_manifold:\n", + " edges = mesh.get_non_manifold_edges(allow_boundary_edges=True)\n", + " geoms.append(o3dtut.edges_to_lineset(mesh, edges, (1, 0, 0)))\n", + " if not edge_manifold_boundary:\n", + " edges = mesh.get_non_manifold_edges(allow_boundary_edges=False)\n", + " geoms.append(o3dtut.edges_to_lineset(mesh, edges, (0, 1, 0)))\n", + " if not vertex_manifold:\n", + " verts = np.asarray(mesh.get_non_manifold_vertices())\n", + " pcl = o3d.geometry.PointCloud(\n", + " points=o3d.utility.Vector3dVector(np.asarray(mesh.vertices)[verts]))\n", + " pcl.paint_uniform_color((0, 0, 1))\n", + " geoms.append(pcl)\n", + " if self_intersecting:\n", + " intersecting_triangles = np.asarray(\n", + " mesh.get_self_intersecting_triangles())\n", + " intersecting_triangles = intersecting_triangles[0:1]\n", + " intersecting_triangles = np.unique(intersecting_triangles)\n", + " print(\" # visualize self-intersecting triangles\")\n", + " triangles = np.asarray(mesh.triangles)[intersecting_triangles]\n", + " edges = [\n", + " np.vstack((triangles[:, i], triangles[:, j]))\n", + " for i, j in [(0, 1), (1, 2), (2, 0)]\n", + " ]\n", + " edges = np.hstack(edges).T\n", + " edges = o3d.utility.Vector2iVector(edges)\n", + " geoms.append(o3dtut.edges_to_lineset(mesh, edges, (1, 0, 1)))\n", + " o3d.visualization.draw_geometries(geoms, mesh_show_back_face=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "check_properties('Knot', o3dtut.get_knot_mesh())\n", + "check_properties('Moebius', o3d.geometry.TriangleMesh.create_moebius(twists=1))\n", + "check_properties(\"non-manifold edge\", o3dtut.get_non_manifold_edge_mesh())\n", + "check_properties(\"non-manifold vertex\", o3dtut.get_non_manifold_vertex_mesh())\n", + "check_properties(\"open box\", o3dtut.get_open_box_mesh())\n", + "check_properties(\"intersecting_boxes\", o3dtut.get_intersecting_boxes_mesh())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mesh filtering\n", + "Open3D contains a number of methods to filter meshes. In the following we show the implemented filters to smooth noisy triangle meshes." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Average filter\n", + "The simplest filter is the average filter. A given vertex $v_i$ is given by the average of the adjacent vertices $\\mathcal{N}$\n", + "\n", + "\\begin{equation}\n", + "v_i = \\frac{v_i + \\sum_{n \\in \\mathcal{N}} v_n}{|N| + 1} \\,.\n", + "\\end{equation}\n", + "\n", + "This filter can be used to denoise meshes as demonstrated in the code below. The parameter `number_of_iterations` in the function `filter_smooth_simple` defines the how often the filter is applied to the mesh." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('create noisy mesh')\n", + "mesh_in = o3dtut.get_knot_mesh()\n", + "vertices = np.asarray(mesh_in.vertices)\n", + "noise = 5\n", + "vertices += np.random.uniform(0, noise, size=vertices.shape)\n", + "mesh_in.vertices = o3d.utility.Vector3dVector(vertices)\n", + "mesh_in.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh_in])\n", + "\n", + "print('filter with average with 1 iteration') \n", + "mesh_out = mesh_in.filter_smooth_simple(number_of_iterations=1)\n", + "mesh_out.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh_out])\n", + "\n", + "print('filter with average with 5 iterations') \n", + "mesh_out = mesh_in.filter_smooth_simple(number_of_iterations=5)\n", + "mesh_out.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh_out])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Laplacian\n", + "Another important mesh filter is the Laplacian defined as \n", + "\n", + "\\begin{equation}\n", + "v_i = v_i \\cdot \\lambda \\sum_{n \\in N} w_n v_n - v_i \\,,\n", + "\\end{equation}\n", + "\n", + "where $\\lambda$ is the strenght of the filter and $w_n$ are normalized weights that relate to the distance of the neighbouring vertices. The filter is implemented in `filter_smooth_laplacian` and has the parameters `number_of_iterations` and `lambda`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('filter with Laplacian with 10 iterations') \n", + "mesh_out = mesh_in.filter_smooth_laplacian(number_of_iterations=10)\n", + "mesh_out.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh_out])\n", + "\n", + "print('filter with Laplacian with 50 iterations') \n", + "mesh_out = mesh_in.filter_smooth_laplacian(number_of_iterations=50)\n", + "mesh_out.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh_out])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Taubin filter\n", + "The problem with the average and Laplacian filter is that they lead to a shrinkage of the triangle mesh. [\\[Taubin1995\\]](../reference.html#Taubin1995) showed that the application of two Laplacian filters with different $\\lambda$ parameters can prevent the mesh shrinkage. The filter is implemented in `filter_smooth_taubin`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('filter with Taubin with 10 iterations') \n", + "mesh_out = mesh_in.filter_smooth_taubin(number_of_iterations=10)\n", + "mesh_out.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh_out])\n", + "\n", + "print('filter with Taubin with 100 iterations') \n", + "mesh_out = mesh_in.filter_smooth_taubin(number_of_iterations=100)\n", + "mesh_out.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh_out])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Sampling\n", + "Open3D includes functions to sample point clouds from a triangle mesh. The simplest method is `sample_points_uniformly` that uniformly samples points from the 3D surface based on the triangle area. The parameter `number_of_points` defines how many points are sampled from the triangle surface." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_sphere()\n", + "mesh.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh])\n", + "pcd = mesh.sample_points_uniformly(number_of_points=500)\n", + "o3d.visualization.draw_geometries([pcd])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3dtut.get_bunny_mesh()\n", + "mesh.compute_vertex_normals()\n", + "o3d.visualization.draw_geometries([mesh])\n", + "pcd = mesh.sample_points_uniformly(number_of_points=500)\n", + "o3d.visualization.draw_geometries([pcd])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Uniform sampling can yield clusters of points on the surface. Poisson disk sampling can evenly distributes the points on the surface. The method `sample_points_poisson_disk` implements sample elimination. Therefore, the method starts with a sampled point cloud and removes point to satisfy the sampling criterion. The method supports two options to provide the initial point cloud')\n", + "\n", + "1. Default via the parameter `init_factor`: The method first samples uniformly a point cloud from the mesh with `init_factor` x `number_of_points` and uses this for the elimination\n", + "2. One can provide a point cloud and pass it to the `sample_points_poisson_disk` method. Then, this point cloud is used for elimination." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_sphere()\n", + "pcd = mesh.sample_points_poisson_disk(number_of_points=500, init_factor=5)\n", + "o3d.visualization.draw_geometries([pcd])\n", + "\n", + "pcd = mesh.sample_points_uniformly(number_of_points=2500)\n", + "pcd = mesh.sample_points_poisson_disk(number_of_points=500, pcl=pcd)\n", + "o3d.visualization.draw_geometries([pcd])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3dtut.get_bunny_mesh()\n", + "pcd = mesh.sample_points_poisson_disk(number_of_points=500, init_factor=5)\n", + "o3d.visualization.draw_geometries([pcd])\n", + "\n", + "pcd = mesh.sample_points_uniformly(number_of_points=2500)\n", + "pcd = mesh.sample_points_poisson_disk(number_of_points=500, pcl=pcd)\n", + "o3d.visualization.draw_geometries([pcd])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mesh subdivision\n", + "In mesh subdivision we divide each triangles into a number of smaller triangles. In the simplest case, we compute the midpoint of each side per triangle and divide the triangle into four smaller triangles. This is implemented in the `subdivide_midpoint` function. The 3D surface and area stays the same, but the number of vertices and triangles increases. The parameter `number_of_iterations` defines how often this process is repeated." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_box()\n", + "mesh.compute_vertex_normals()\n", + "print(f'The mesh has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)\n", + "mesh = mesh.subdivide_midpoint(number_of_iterations=1)\n", + "print(f'After subdivision it has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Open3D implements an additional subdivision method based on [\\[Loop1987\\]](../reference.html#Loop1987). The method is based on a quartic box spline, which generate $C^2$ continuous limit surfaces everywhere except at extraordinary vertices where they are $C^1$ continuous. This leads to smoother corners." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_sphere()\n", + "mesh.compute_vertex_normals()\n", + "print(f'The mesh has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)\n", + "mesh = mesh.subdivide_loop(number_of_iterations=2)\n", + "print(f'After subdivision it has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3dtut.get_knot_mesh()\n", + "mesh.compute_vertex_normals()\n", + "print(f'The mesh has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)\n", + "mesh = mesh.subdivide_loop(number_of_iterations=1)\n", + "print(f'After subdivision it has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mesh simplification\n", + "Sometimes we want to represent a high-resolution mesh with fewer triangles and vertices, but the low-resolution mesh should still be close to the high-resolution mesh. For this purpose Open3D implements a number of mesh simplification methods." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Vertex clustering\n", + "The vertex clustering method pools all vertices that fall into a voxel of a given size to a single vertex. The method is implemented in `simplify_vertex_clustering` and has as parameters `voxel_size` that defines the size of the voxel grid and `contraction` that defines how the vertices are pooled. `o3d.geometry.SimplificationContraction.Average` computes a simple average." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh_in = o3dtut.get_bunny_mesh()\n", + "print(f'Input mesh has {len(mesh_in.vertices)} vertices and {len(mesh_in.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh_in])\n", + "\n", + "voxel_size = max(mesh_in.get_max_bound() - mesh_in.get_min_bound()) / 32\n", + "print(f'voxel_size = {voxel_size:e}')\n", + "mesh_smp = mesh_in.simplify_vertex_clustering(\n", + " voxel_size=voxel_size,\n", + " contraction=o3d.geometry.SimplificationContraction.Average)\n", + "print(f'Simplified mesh has {len(mesh_smp.vertices)} vertices and {len(mesh_smp.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh_smp])\n", + "\n", + "voxel_size = max(mesh_in.get_max_bound() - mesh_in.get_min_bound()) / 16\n", + "print(f'voxel_size = {voxel_size:e}')\n", + "mesh_smp = mesh_in.simplify_vertex_clustering(\n", + " voxel_size=voxel_size,\n", + " contraction=o3d.geometry.SimplificationContraction.Average)\n", + "print(f'Simplified mesh has {len(mesh_smp.vertices)} vertices and {len(mesh_smp.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh_smp])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Mesh decimation\n", + "Another catogory of mesh simplification methods is mesh decimation that operates in incremental steps. We select a single triangle that minimizes a error metric and remove it. This is repeated until a required number of triangles is achieved. Open3D implements `simplify_quadric_decimation` that minimizes error quadrics (distances to neighbouring planes). The parameter `target_number_of_triangles` defines the stopping critera of the decimation algorithm." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh_smp = mesh_in.simplify_quadric_decimation(\n", + " target_number_of_triangles=6500)\n", + "print(f'Simplified mesh has {len(mesh_smp.vertices)} vertices and {len(mesh_smp.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh_smp])\n", + "\n", + "mesh_smp = mesh_in.simplify_quadric_decimation(\n", + " target_number_of_triangles=1700)\n", + "print(f'Simplified mesh has {len(mesh_smp.vertices)} vertices and {len(mesh_smp.triangles)} triangles')\n", + "o3d.visualization.draw_geometries([mesh_smp])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Connected components\n", + "The result of various reconstruction methods (e.g., [RGBD Integration](../Advanced/rgbd_integration.ipynb) is not always a single triangle mesh, but a number of meshes. Some of the smaller parts are due to noise and we most likely want to remove them. Open3D implements a connected components algorithm `cluster_connected_triangles` that assigns each triangle to a cluster of connected triangles. It returns for each triangle the index of the cluster in `triangle_clusters`, and per cluster the number of triangles in `cluster_n_triangles` and the surface area of the cluster in `cluster_area`.\n", + "\n", + "The code below shows the application of `cluster_connected_triangles` and how it can be used to remove spurious triangles." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Generate data\")\n", + "mesh = o3dtut.get_bunny_mesh().subdivide_midpoint(number_of_iterations=2)\n", + "vert = np.asarray(mesh.vertices)\n", + "min_vert, max_vert = vert.min(axis=0), vert.max(axis=0)\n", + "for _ in range(30):\n", + " cube = o3d.geometry.TriangleMesh.create_box()\n", + " cube.scale(0.005)\n", + " cube.translate(\n", + " (\n", + " np.random.uniform(min_vert[0], max_vert[0]),\n", + " np.random.uniform(min_vert[1], max_vert[1]),\n", + " np.random.uniform(min_vert[2], max_vert[2]),\n", + " ),\n", + " relative=False,\n", + " )\n", + " mesh += cube\n", + "mesh.compute_vertex_normals()\n", + "print(\"Show input mesh\")\n", + "o3d.visualization.draw_geometries([mesh])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Cluster connected triangles\")\n", + "with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", + " triangle_clusters, cluster_n_triangles, cluster_area = (\n", + " mesh.cluster_connected_triangles())\n", + "triangle_clusters = np.asarray(triangle_clusters)\n", + "cluster_n_triangles = np.asarray(cluster_n_triangles)\n", + "cluster_area = np.asarray(cluster_area)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Show mesh with small clusters removed\")\n", + "mesh_0 = copy.deepcopy(mesh)\n", + "triangles_to_remove = cluster_n_triangles[triangle_clusters] < 100\n", + "mesh_0.remove_triangles_by_mask(triangles_to_remove)\n", + "o3d.visualization.draw_geometries([mesh_0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Show largest cluster\")\n", + "mesh_1 = copy.deepcopy(mesh)\n", + "largest_cluster_idx = cluster_n_triangles.argmax()\n", + "triangles_to_remove = triangle_clusters != largest_cluster_idx\n", + "mesh_1.remove_triangles_by_mask(triangles_to_remove)\n", + "o3d.visualization.draw_geometries([mesh_1])" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Basic/mesh.py b/examples/Python/Basic/mesh.py deleted file mode 100644 index 6ec7ecea54e..00000000000 --- a/examples/Python/Basic/mesh.py +++ /dev/null @@ -1,43 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/mesh.py - -import copy -import numpy as np -import open3d as o3d - -if __name__ == "__main__": - - print("Testing mesh in open3d ...") - mesh = o3d.io.read_triangle_mesh("../../TestData/knot.ply") - print(mesh) - print(np.asarray(mesh.vertices)) - print(np.asarray(mesh.triangles)) - print("") - - print("Try to render a mesh with normals (exist: " + - str(mesh.has_vertex_normals()) + ") and colors (exist: " + - str(mesh.has_vertex_colors()) + ")") - o3d.visualization.draw_geometries([mesh]) - print("A mesh with no normals and no colors does not seem good.") - - print("Computing normal and rendering it.") - mesh.compute_vertex_normals() - print(np.asarray(mesh.triangle_normals)) - o3d.visualization.draw_geometries([mesh]) - - print("We make a partial mesh of only the first half triangles.") - mesh1 = copy.deepcopy(mesh) - mesh1.triangles = o3d.utility.Vector3iVector( - np.asarray(mesh1.triangles)[:len(mesh1.triangles) // 2, :]) - mesh1.triangle_normals = o3d.utility.Vector3dVector( - np.asarray(mesh1.triangle_normals)[:len(mesh1.triangle_normals) // - 2, :]) - print(mesh1.triangles) - o3d.visualization.draw_geometries([mesh1]) - - print("Painting the mesh") - mesh1.paint_uniform_color([1, 0.706, 0]) - o3d.visualization.draw_geometries([mesh1]) diff --git a/examples/Python/Basic/mesh_connected_components.py b/examples/Python/Basic/mesh_connected_components.py deleted file mode 100644 index 9fdeb8a66da..00000000000 --- a/examples/Python/Basic/mesh_connected_components.py +++ /dev/null @@ -1,67 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/mesh_connected_components.py - -import open3d as o3d -import numpy as np -import copy -import time -import os -import sys - -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, "../Misc")) -import meshes - -if __name__ == "__main__": - o3d.utility.set_verbosity_level(o3d.utility.Debug) - - print("Generate data") - mesh = meshes.bunny() - - print("Subdivide mesh to make it a bit harder") - mesh = mesh.subdivide_midpoint(number_of_iterations=2) - print(mesh) - - vert = np.asarray(mesh.vertices) - min_vert, max_vert = vert.min(axis=0), vert.max(axis=0) - for _ in range(30): - cube = o3d.geometry.TriangleMesh.create_box() - cube.scale(0.005) - cube.translate( - ( - np.random.uniform(min_vert[0], max_vert[0]), - np.random.uniform(min_vert[1], max_vert[1]), - np.random.uniform(min_vert[2], max_vert[2]), - ), - relative=False, - ) - mesh += cube - mesh.compute_vertex_normals() - - print("Cluster connected triangles") - tic = time.time() - triangle_clusters, cluster_n_triangles, cluster_area = ( - mesh.cluster_connected_triangles()) - print(" took {}[s]".format(time.time() - tic)) - triangle_clusters = np.asarray(triangle_clusters) - cluster_n_triangles = np.asarray(cluster_n_triangles) - cluster_area = np.asarray(cluster_area) - - print("Show input mesh") - o3d.visualization.draw_geometries([mesh]) - - print("Show mesh with small clusters removed") - mesh_0 = copy.deepcopy(mesh) - triangles_to_remove = cluster_n_triangles[triangle_clusters] < 100 - mesh_0.remove_triangles_by_mask(triangles_to_remove) - o3d.visualization.draw_geometries([mesh_0]) - - print("Show largest cluster") - mesh_1 = copy.deepcopy(mesh) - largest_cluster_idx = cluster_n_triangles.argmax() - triangles_to_remove = triangle_clusters != largest_cluster_idx - mesh_1.remove_triangles_by_mask(triangles_to_remove) - o3d.visualization.draw_geometries([mesh_1]) diff --git a/examples/Python/Basic/mesh_filter.py b/examples/Python/Basic/mesh_filter.py deleted file mode 100644 index b5a93b6c093..00000000000 --- a/examples/Python/Basic/mesh_filter.py +++ /dev/null @@ -1,46 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/mesh_filter.py - -import numpy as np -import open3d as o3d - -import os -import sys -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - - -def test_mesh(noise=0): - mesh = meshes.knot() - if noise > 0: - vertices = np.asarray(mesh.vertices) - vertices += np.random.uniform(0, noise, size=vertices.shape) - mesh.vertices = o3d.utility.Vector3dVector(vertices) - mesh.compute_vertex_normals() - return mesh - - -if __name__ == '__main__': - in_mesh = test_mesh() - o3d.visualization.draw_geometries([in_mesh]) - - mesh = in_mesh.filter_sharpen(number_of_iterations=1, strength=1) - o3d.visualization.draw_geometries([mesh]) - - in_mesh = test_mesh(noise=5) - o3d.visualization.draw_geometries([in_mesh]) - - mesh = in_mesh.filter_smooth_simple(number_of_iterations=1) - o3d.visualization.draw_geometries([mesh]) - - o3d.visualization.draw_geometries([mesh]) - mesh = in_mesh.filter_smooth_laplacian(number_of_iterations=100) - o3d.visualization.draw_geometries([mesh]) - - o3d.visualization.draw_geometries([mesh]) - mesh = in_mesh.filter_smooth_taubin(number_of_iterations=100) - o3d.visualization.draw_geometries([mesh]) diff --git a/examples/Python/Basic/mesh_io.py b/examples/Python/Basic/mesh_io.py deleted file mode 100644 index 0fb8ffc314f..00000000000 --- a/examples/Python/Basic/mesh_io.py +++ /dev/null @@ -1,66 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/mesh_io.py - -import numpy as np -import open3d as o3d -import os - -import sys -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - -if __name__ == "__main__": - mesh = meshes.bunny() - mesh.remove_unreferenced_vertices() - triangles = np.asarray(mesh.triangles) - vertices = np.asarray(mesh.vertices) - vertex_normals = np.asarray(mesh.vertex_normals) - n_vertices = vertices.shape[0] - vertex_colors = np.random.uniform(0, 1, size=(n_vertices, 3)) - mesh.vertex_colors = o3d.utility.Vector3dVector(vertex_colors) - - def test_float_array(array_a, array_b, eps=1e-6): - diff = array_a - array_b - dist = np.linalg.norm(diff, axis=1) - return np.all(dist < eps) - - def test_int_array(array_a, array_b): - diff = array_a - array_b - return np.all(diff == 0) - - def compare_mesh(mesh): - success = True - if not test_float_array(vertices, np.asarray(mesh.vertices)): - success = False - print('[WARNING] vertices are not the same') - if not test_float_array(vertex_normals, np.asarray( - mesh.vertex_normals)): - success = False - print('[WARNING] vertex_normals are not the same') - if not test_float_array( - vertex_colors, np.asarray(mesh.vertex_colors), eps=1e-2): - success = False - print('[WARNING] vertex_colors are not the same') - if not test_int_array(triangles, np.asarray(mesh.triangles)): - success = False - print('[WARNING] triangles are not the same') - if success: - print('[INFO] written and read mesh are equal') - - print('Write ply file') - o3d.io.write_triangle_mesh('tmp.ply', mesh) - print('Read ply file') - mesh_test = o3d.io.read_triangle_mesh('tmp.ply') - compare_mesh(mesh_test) - os.remove('tmp.ply') - - print('Write obj file') - o3d.io.write_triangle_mesh('tmp.obj', mesh) - print('Read obj file') - mesh_test = o3d.io.read_triangle_mesh('tmp.obj') - compare_mesh(mesh_test) - os.remove('tmp.obj') diff --git a/examples/Python/Basic/mesh_properties.py b/examples/Python/Basic/mesh_properties.py deleted file mode 100644 index 624217047e3..00000000000 --- a/examples/Python/Basic/mesh_properties.py +++ /dev/null @@ -1,170 +0,0 @@ -# Open3selfopen3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/mesh_properties.py - -import numpy as np -import open3d as o3d -import time - -import os -import sys - -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, "../Misc")) -import meshes - - -def mesh_generator(edge_cases=True): - yield "box", o3d.geometry.TriangleMesh.create_box() - yield "sphere", o3d.geometry.TriangleMesh.create_sphere() - yield "cone", o3d.geometry.TriangleMesh.create_cone() - yield "torus", o3d.geometry.TriangleMesh.create_torus(radial_resolution=30, - tubular_resolution=20) - yield "moebius (twists=1)", o3d.geometry.TriangleMesh.create_moebius( - twists=1) - yield "moebius (twists=2)", o3d.geometry.TriangleMesh.create_moebius( - twists=2) - yield "moebius (twists=3)", o3d.geometry.TriangleMesh.create_moebius( - twists=3) - - yield "knot", meshes.knot() - - if edge_cases: - yield "non-manifold edge", meshes.non_manifold_edge() - yield "non-manifold vertex", meshes.non_manifold_vertex() - yield "open box", meshes.open_box() - yield "boxes", meshes.intersecting_boxes() - - -def check_properties(name, mesh): - - def fmt_bool(b): - return "yes" if b else "no" - - print(name) - edge_manifold = mesh.is_edge_manifold(allow_boundary_edges=True) - print(" edge_manifold: %s" % fmt_bool(edge_manifold)) - edge_manifold_boundary = mesh.is_edge_manifold(allow_boundary_edges=False) - print(" edge_manifold_boundary: %s" % fmt_bool(edge_manifold_boundary)) - vertex_manifold = mesh.is_vertex_manifold() - print(" vertex_manifold: %s" % fmt_bool(vertex_manifold)) - self_intersecting = mesh.is_self_intersecting() - print(" self_intersecting: %s" % fmt_bool(self_intersecting)) - watertight = mesh.is_watertight() - print(" watertight: %s" % fmt_bool(watertight)) - orientable = mesh.is_orientable() - print(" orientable: %s" % fmt_bool(orientable)) - - mesh.compute_vertex_normals() - - if not edge_manifold: - edges = mesh.get_non_manifold_edges(allow_boundary_edges=True) - print(" # visualize non-manifold edges (allow_boundary_edges=True)") - o3d.visualization.draw_geometries( - [mesh, meshes.edges_to_lineset(mesh, edges, (1, 0, 0))]) - if not edge_manifold_boundary: - edges = mesh.get_non_manifold_edges(allow_boundary_edges=False) - print(" # visualize non-manifold edges (allow_boundary_edges=False)") - o3d.visualization.draw_geometries( - [mesh, meshes.edges_to_lineset(mesh, edges, (0, 1, 0))]) - if not vertex_manifold: - verts = np.asarray(mesh.get_non_manifold_vertices()) - print(" # visualize non-manifold vertices") - pcl = o3d.geometry.PointCloud( - points=o3d.utility.Vector3dVector(np.asarray(mesh.vertices)[verts])) - pcl.paint_uniform_color((0, 0, 1)) - o3d.visualization.draw_geometries([mesh, pcl]) - if self_intersecting: - intersecting_triangles = np.asarray( - mesh.get_self_intersecting_triangles()) - intersecting_triangles = intersecting_triangles[0:1] - intersecting_triangles = np.unique(intersecting_triangles) - print(" # visualize self-intersecting triangles") - triangles = np.asarray(mesh.triangles)[intersecting_triangles] - edges = [ - np.vstack((triangles[:, i], triangles[:, j])) - for i, j in [(0, 1), (1, 2), (2, 0)] - ] - edges = np.hstack(edges).T - edges = o3d.utility.Vector2iVector(edges) - o3d.visualization.draw_geometries( - [mesh, meshes.edges_to_lineset(mesh, edges, (1, 1, 0))]) - if watertight: - print(" # visualize watertight mesh") - o3d.visualization.draw_geometries([mesh]) - - if not edge_manifold: - print(" # Remove non-manifold edges") - mesh.remove_non_manifold_edges() - print(" # Is mesh now edge-manifold: {}".format( - fmt_bool(mesh.is_edge_manifold()))) - o3d.visualization.draw_geometries([mesh]) - - -if __name__ == "__main__": - # test mesh properties - print("#" * 80) - print("Test mesh properties") - print("#" * 80) - for name, mesh in mesh_generator(edge_cases=True): - check_properties(name, mesh) - - # fix triangle orientation - print("#" * 80) - print("Fix triangle orientation") - print("#" * 80) - for name, mesh in mesh_generator(edge_cases=False): - mesh.compute_vertex_normals() - triangles = np.asarray(mesh.triangles) - rnd_idx = np.random.rand(*triangles.shape).argsort(axis=1) - rnd_idx[0] = (0, 1, 2) - triangles = np.take_along_axis(triangles, rnd_idx, axis=1) - mesh.triangles = o3d.utility.Vector3iVector(triangles) - o3d.visualization.draw_geometries([mesh]) - sucess = mesh.orient_triangles() - print("%s orientated: %s" % (name, "yes" if sucess else "no")) - o3d.visualization.draw_geometries([mesh]) - - # intersection tests - print("#" * 80) - print("Intersection tests") - print("#" * 80) - np.random.seed(30) - bbox = o3d.geometry.TriangleMesh.create_box(20, 20, 20).translate( - (-10, -10, -10)) - meshes = [o3d.geometry.TriangleMesh.create_box() for _ in range(20)] - meshes.append(o3d.geometry.TriangleMesh.create_sphere()) - meshes.append(o3d.geometry.TriangleMesh.create_cone()) - meshes.append(o3d.geometry.TriangleMesh.create_torus()) - dirs = [np.random.uniform(-0.1, 0.1, size=(3,)) for _ in meshes] - for mesh in meshes: - mesh.compute_vertex_normals() - mesh.paint_uniform_color((0.5, 0.5, 0.5)) - mesh.translate(np.random.uniform(-7.5, 7.5, size=(3,))) - vis = o3d.visualization.Visualizer() - vis.create_window() - for mesh in meshes: - vis.add_geometry(mesh) - for iter in range(1000): - for mesh, dir in zip(meshes, dirs): - mesh.paint_uniform_color((0.5, 0.5, 0.5)) - mesh.translate(dir) - for idx0, mesh0 in enumerate(meshes): - collision = False - collision = collision or mesh0.is_intersecting(bbox) - for idx1, mesh1 in enumerate(meshes): - if collision: - break - if idx0 == idx1: - continue - collision = collision or mesh0.is_intersecting(mesh1) - if collision: - mesh0.paint_uniform_color((1, 0, 0)) - dirs[idx0] *= -1 - vis.update_geometry(mesh0) - vis.poll_events() - vis.update_renderer() - time.sleep(0.05) - vis.destroy_window() diff --git a/examples/Python/Basic/mesh_sampling.py b/examples/Python/Basic/mesh_sampling.py deleted file mode 100644 index 8e0fa36787c..00000000000 --- a/examples/Python/Basic/mesh_sampling.py +++ /dev/null @@ -1,76 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/mesh_sampling.py - -import numpy as np -import time -import open3d as o3d - -import os -import sys -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - - -def time_fcn(fcn, *fcn_args, runs=5): - times = [] - for _ in range(runs): - tic = time.time() - res = fcn(*fcn_args) - times.append(time.time() - tic) - return res, times - - -def mesh_generator(): - yield meshes.plane() - yield o3d.geometry.TriangleMesh.create_sphere() - yield meshes.bunny() - yield meshes.armadillo() - - -if __name__ == "__main__": - plane = meshes.plane() - o3d.visualization.draw_geometries([plane]) - - print('Uniform sampling can yield clusters of points on the surface') - pcd = plane.sample_points_uniformly(number_of_points=500) - o3d.visualization.draw_geometries([pcd]) - - print( - 'Poisson disk sampling can evenly distributes the points on the surface.' - ) - print('The method implements sample elimination.') - print('Therefore, the method starts with a sampled point cloud and removes ' - 'point to satisfy the sampling criterion.') - print('The method supports two options to provide the initial point cloud') - print('1) Default via the parameter init_factor: The method first samples ' - 'uniformly a point cloud from the mesh with ' - 'init_factor x number_of_points and uses this for the elimination') - pcd = plane.sample_points_poisson_disk(number_of_points=500, init_factor=5) - o3d.visualization.draw_geometries([pcd]) - - print( - '2) one can provide an own point cloud and pass it to the ' - 'o3d.geometry.sample_points_poisson_disk method. Then this point cloud is used ' - 'for elimination.') - print('Initial point cloud') - pcd = plane.sample_points_uniformly(number_of_points=2500) - o3d.visualization.draw_geometries([pcd]) - pcd = plane.sample_points_poisson_disk(number_of_points=500, pcl=pcd) - o3d.visualization.draw_geometries([pcd]) - - print('Timings') - for mesh in mesh_generator(): - mesh.compute_vertex_normals() - o3d.visualization.draw_geometries([mesh]) - - pcd, times = time_fcn(mesh.sample_points_uniformly, 500) - print('sample uniform took on average: %f[s]' % np.mean(times)) - o3d.visualization.draw_geometries([pcd]) - - pcd, times = time_fcn(mesh.sample_points_poisson_disk, 500, 5) - print('sample poisson disk took on average: %f[s]' % np.mean(times)) - o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/mesh_simplification.py b/examples/Python/Basic/mesh_simplification.py deleted file mode 100644 index 5a7b1c44400..00000000000 --- a/examples/Python/Basic/mesh_simplification.py +++ /dev/null @@ -1,78 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/mesh_simplification.py - -import numpy as np -import open3d as o3d - -import os -import sys -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - - -def mesh_generator(): - mesh = meshes.plane() - yield mesh.subdivide_midpoint(2) - - mesh = o3d.geometry.TriangleMesh.create_box() - yield mesh.subdivide_midpoint(2) - - mesh = o3d.geometry.TriangleMesh.create_sphere() - yield mesh.subdivide_midpoint(2) - - mesh = o3d.geometry.TriangleMesh.create_cone() - yield mesh.subdivide_midpoint(2) - - mesh = o3d.geometry.TriangleMesh.create_cylinder() - yield mesh.subdivide_midpoint(2) - - yield meshes.bathtub() - - yield meshes.bunny() - - -if __name__ == "__main__": - np.random.seed(42) - - for mesh in mesh_generator(): - mesh.compute_vertex_normals() - n_verts = np.asarray(mesh.vertices).shape[0] - mesh.vertex_colors = o3d.utility.Vector3dVector( - np.random.uniform(0, 1, size=(n_verts, 3))) - - print("original mesh has %d triangles and %d vertices" % (np.asarray( - mesh.triangles).shape[0], np.asarray(mesh.vertices).shape[0])) - o3d.visualization.draw_geometries([mesh]) - - voxel_size = max(mesh.get_max_bound() - mesh.get_min_bound()) / 4 - target_number_of_triangles = np.asarray(mesh.triangles).shape[0] // 2 - print('voxel_size = %f' % voxel_size) - - mesh_smp = mesh.simplify_vertex_clustering( - voxel_size=voxel_size, - contraction=o3d.geometry.SimplificationContraction.Average) - print( - "vertex clustered mesh (average) has %d triangles and %d vertices" % - (np.asarray(mesh_smp.triangles).shape[0], - np.asarray(mesh_smp.vertices).shape[0])) - o3d.visualization.draw_geometries([mesh_smp]) - - mesh_smp = mesh.simplify_vertex_clustering( - voxel_size=voxel_size, - contraction=o3d.geometry.SimplificationContraction.Quadric) - print( - "vertex clustered mesh (quadric) has %d triangles and %d vertices" % - (np.asarray(mesh_smp.triangles).shape[0], - np.asarray(mesh_smp.vertices).shape[0])) - o3d.visualization.draw_geometries([mesh_smp]) - - mesh_smp = mesh.simplify_quadric_decimation( - target_number_of_triangles=target_number_of_triangles) - print("quadric decimated mesh has %d triangles and %d vertices" % - (np.asarray(mesh_smp.triangles).shape[0], - np.asarray(mesh_smp.vertices).shape[0])) - o3d.visualization.draw_geometries([mesh_smp]) diff --git a/examples/Python/Basic/mesh_subdivision.py b/examples/Python/Basic/mesh_subdivision.py deleted file mode 100644 index 5c86df1fea5..00000000000 --- a/examples/Python/Basic/mesh_subdivision.py +++ /dev/null @@ -1,57 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/mesh_subdivision.py - -import numpy as np -import open3d as o3d - -import os -import sys -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - - -def mesh_generator(): - yield meshes.triangle() - yield meshes.plane() - yield o3d.geometry.TriangleMesh.create_tetrahedron() - yield o3d.geometry.TriangleMesh.create_box() - yield o3d.geometry.TriangleMesh.create_octahedron() - yield o3d.geometry.TriangleMesh.create_icosahedron() - yield o3d.geometry.TriangleMesh.create_sphere() - yield o3d.geometry.TriangleMesh.create_cone() - yield o3d.geometry.TriangleMesh.create_cylinder() - yield meshes.knot() - yield meshes.bathtub() - - -if __name__ == "__main__": - np.random.seed(42) - - number_of_iterations = 3 - - for mesh in mesh_generator(): - mesh.compute_vertex_normals() - n_verts = np.asarray(mesh.vertices).shape[0] - colors = np.random.uniform(0, 1, size=(n_verts, 3)) - mesh.vertex_colors = o3d.utility.Vector3dVector(colors) - - print("original mesh has %d triangles and %d vertices" % (np.asarray( - mesh.triangles).shape[0], np.asarray(mesh.vertices).shape[0])) - o3d.visualization.draw_geometries([mesh]) - - mesh_up = mesh.subdivide_midpoint( - number_of_iterations=number_of_iterations) - print("midpoint upsampled mesh has %d triangles and %d vertices" % - (np.asarray(mesh_up.triangles).shape[0], - np.asarray(mesh_up.vertices).shape[0])) - o3d.visualization.draw_geometries([mesh_up]) - - mesh_up = mesh.subdivide_loop(number_of_iterations=number_of_iterations) - print("loop upsampled mesh has %d triangles and %d vertices" % - (np.asarray(mesh_up.triangles).shape[0], - np.asarray(mesh_up.vertices).shape[0])) - o3d.visualization.draw_geometries([mesh_up]) diff --git a/examples/Python/Basic/open3d_tutorial.py b/examples/Python/Basic/open3d_tutorial.py deleted file mode 100644 index 08ab21d241e..00000000000 --- a/examples/Python/Basic/open3d_tutorial.py +++ /dev/null @@ -1,32 +0,0 @@ -import open3d as o3d -import numpy as np -import PIL.Image -import IPython.display - - -def jupyter_draw_geometries( - geoms, - window_name="Open3D", - width=1920, - height=1080, - left=50, - top=50, - point_show_normal=False, -): - vis = o3d.visualization.Visualizer() - vis.create_window(window_name=window_name, - width=width, - height=height, - left=left, - top=top) - vis.get_render_option().point_show_normal = point_show_normal - for geom in geoms: - vis.add_geometry(geom) - vis.run() - im = vis.capture_screen_float_buffer() - vis.destroy_window() - im = (255 * np.asarray(im)).astype(np.uint8) - IPython.display.display(PIL.Image.fromarray(im, "RGB")) - - -o3d.visualization.draw_geometries = jupyter_draw_geometries diff --git a/examples/Python/Basic/pointcloud.ipynb b/examples/Python/Basic/pointcloud.ipynb index 8a50c8c4086..87401951ce7 100644 --- a/examples/Python/Basic/pointcloud.ipynb +++ b/examples/Python/Basic/pointcloud.ipynb @@ -3,13 +3,22 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "nbsphinx": "hidden" + }, "outputs": [], "source": [ "import open3d as o3d\n", "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import copy\n", + "import sys\n", + "\n", "# only needed for tutorial, monkey patches visualization\n", - "import open3d_tutorial " + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" ] }, { @@ -33,7 +42,10 @@ "pcd = o3d.io.read_point_cloud(\"../../TestData/fragment.ply\")\n", "print(pcd)\n", "print(np.asarray(pcd.points))\n", - "o3d.visualization.draw_geometries([pcd])" + "o3d.visualization.draw_geometries([pcd], zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])" ] }, { @@ -46,9 +58,21 @@ "\n", "It looks like a dense surface, but it is actually a point cloud rendered as surfels. The GUI supports various keyboard functions. One of them, the - key reduces the size of the points (surfels).\n", "\n", - "**Note:** Press `h` key to print out a complete list of keyboard instructions for the GUI. For more information of the visualization GUI, refer to Visualization and Customized visualization. TODO links\n", + "
\n", + " \n", + "**Note:** \n", + "\n", + "Press `h` key to print out a complete list of keyboard instructions for the GUI. For more information of the visualization GUI, refer to [Visualization](visualization.ipynb) and [Customized visualization](../Advanced/customized_visualization.rst).\n", + "\n", + "
\n", + "\n", + "
\n", + " \n", + "**Note:** \n", "\n", - "**Note:** On OS X, the GUI window may not receive keyboard event. In this case, try to launch Python with `pythonw` instead of `python`." + "On OS X, the GUI window may not receive keyboard event. In this case, try to launch Python with `pythonw` instead of `python`.\n", + "\n", + "
" ] }, { @@ -70,14 +94,17 @@ "source": [ "print(\"Downsample the point cloud with a voxel of 0.05\")\n", "downpcd = pcd.voxel_down_sample(voxel_size=0.05)\n", - "o3d.visualization.draw_geometries([downpcd])" + "o3d.visualization.draw_geometries([downpcd], zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# Vertex normal estimation\n", + "## Vertex normal estimation\n", "Another basic operation for point cloud is point normal estimation.\n", "Press n to see point normal. Key - and key + can be used to control the length of the normal." ] @@ -90,7 +117,11 @@ "source": [ "print(\"Recompute the normal of the downsampled point cloud\")\n", "downpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))\n", - "o3d.visualization.draw_geometries([downpcd], point_show_normal=True)" + "o3d.visualization.draw_geometries([downpcd], zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024], \n", + " point_show_normal=True)" ] }, { @@ -101,7 +132,13 @@ "\n", "The function takes an instance of `KDTreeSearchParamHybrid` class as an argument. The two key arguments `radius = 0.1` and `max_nn = 30` specifies search radius and maximum nearest neighbor. It has 10cm of search radius, and only considers up to 30 neighbors to save computation time.\n", "\n", - "**Note:** The covariance analysis algorithm produces two opposite directions as normal candidates. Without knowing the global structure of the geometry, both can be correct. This is known as the normal orientation problem. Open3D tries to orient the normal to align with the original normal if it exists. Otherwise, Open3D does a random guess. Further orientation functions such as `orient_normals_to_align_with_direction` and `orient_normals_towards_camera_location` need to be called if the orientation is a concern." + "
\n", + " \n", + "**Note:** \n", + "\n", + "The covariance analysis algorithm produces two opposite directions as normal candidates. Without knowing the global structure of the geometry, both can be correct. This is known as the normal orientation problem. Open3D tries to orient the normal to align with the original normal if it exists. Otherwise, Open3D does a random guess. Further orientation functions such as `orient_normals_to_align_with_direction` and `orient_normals_towards_camera_location` need to be called if the orientation is a concern.\n", + "\n", + "
" ] }, { @@ -143,7 +180,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Check Working with NumPy for more examples regarding numpy array. TODO link" + "Check [Working with NumPy](working_with_numpy.ipynb) for more examples regarding numpy array." ] }, { @@ -162,7 +199,10 @@ "print(\"Load a polygon volume and use it to crop the original point cloud\")\n", "vol = o3d.visualization.read_selection_polygon_volume(\"../../TestData/Crop/cropped.json\")\n", "chair = vol.crop_point_cloud(pcd)\n", - "o3d.visualization.draw_geometries([chair])" + "o3d.visualization.draw_geometries([chair], zoom=0.7, \n", + " front=[0.5439, -0.2333, -0.8060],\n", + " lookat=[2.4615, 2.1331, 1.338],\n", + " up=[-0.1781, -0.9708, 0.1608])" ] }, { @@ -187,7 +227,10 @@ "source": [ "print(\"Paint chair\")\n", "chair.paint_uniform_color([1, 0.706, 0])\n", - "o3d.visualization.draw_geometries([chair])" + "o3d.visualization.draw_geometries([chair], zoom=0.7, \n", + " front=[0.5439, -0.2333, -0.8060],\n", + " lookat=[2.4615, 2.1331, 1.338],\n", + " up=[-0.1781, -0.9708, 0.1608])" ] }, { @@ -196,9 +239,168 @@ "source": [ "`paint_uniform_color` paints all the points to a uniform color. The color is in RGB space, [0, 1] range." ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Bounding Volumes\n", + "The `PointCloud` geometry type has bounding volumes as all other geometry types in Open3D. Currently, Open3D implements an `AxisAlignedBoundingBox` and an `OrientedBoundingBox` that can also be used to crop the geometry." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "aabb = chair.get_axis_aligned_bounding_box()\n", + "aabb.color = (1,0,0)\n", + "obb = chair.get_oriented_bounding_box()\n", + "obb.color = (0,1,0)\n", + "o3d.visualization.draw_geometries([chair, aabb, obb], zoom=0.7, \n", + " front=[0.5439, -0.2333, -0.8060],\n", + " lookat=[2.4615, 2.1331, 1.338],\n", + " up=[-0.1781, -0.9708, 0.1608])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convex hull\n", + "The convex hull of a point cloud is the smallest convex set that contains all points. Open3D contains the method `compute_convex_hull` that computes the convex hull for example of a point cloud. The implementation is based on [Qhull](http://www.qhull.org/)\n", + "\n", + "In the example code below we first sample a point cloud from a mesh and compute the convex hull that is returned as triangle mesh. Then, we visualize the convex hull as a red `LineSet`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcl = o3dtut.get_bunny_mesh().sample_points_poisson_disk(number_of_points=2000)\n", + "hull, _ = pcl.compute_convex_hull()\n", + "hull_ls = o3d.geometry.LineSet.create_from_triangle_mesh(hull)\n", + "hull_ls.paint_uniform_color((1, 0, 0))\n", + "o3d.visualization.draw_geometries([pcl, hull_ls])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## DBSCAN clustering\n", + "Given a point clound for example from a depth sensor we want to group local point clouds together. For this purpose, we can use clustering algorithms. Open3D implements DBSCAN [\\[Ester1996\\]](../reference.html#Ester1996) that is a density based clustering algorithm. The algorithm is implemented in `cluster_dbscan` and requires two parameters. `eps` defines the distance to neighbours in a cluster and `min_points` defines the minimun number of points required to form a cluster. The function returns `labels`, where the label `-1` indicates noise." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcd = o3d.io.read_point_cloud(\"../../TestData/fragment.ply\")\n", + "\n", + "with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", + " labels = np.array(pcd.cluster_dbscan(eps=0.02, min_points=10, print_progress=True))\n", + "\n", + "max_label = labels.max()\n", + "print(f\"point cloud has {max_label + 1} clusters\")\n", + "colors = plt.get_cmap(\"tab20\")(labels / (max_label if max_label > 0 else 1))\n", + "colors[labels < 0] = 0\n", + "pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])\n", + "o3d.visualization.draw_geometries([pcd], zoom=0.455, \n", + " front=[-0.4999, -0.1659, -0.8499],\n", + " lookat=[2.1813, 2.0619, 2.0999],\n", + " up=[0.1204, -0.9852, 0.1215])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "**Note:** \n", + "\n", + "This algorithm precomputes all neighbours in the epsilon radius for all points. This can require a lot of memory if epsilon is choosen too large.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Plane segmentation\n", + "Open3D contains also support to segment geometric primitives from point clouds using RANSAC. To find the plane with the largest support in the point cloud, we can use `segement_plane`. The method has three arguments. `distance_threshold` defines the maximum distance a point can have to an estimated plane to be considered an inlier, `ransac_n` defines the number of points that are randomly sampled to estimate a plane, and `num_iterations` defines how often a random plane is sampled and verified. The function than returns the plane as $(a,b,c,d)$ such that for each point $(x,y,z)$ on the plane we have $ax + by + cz + d = 0$. The function further retuns a list of indices of the inlier points." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcd = o3d.io.read_point_cloud(\"../../TestData/fragment.pcd\")\n", + "plane_model, inliers = pcd.segment_plane(distance_threshold=0.01,\n", + " ransac_n=3,\n", + " num_iterations=1000)\n", + "[a, b, c, d] = plane_model\n", + "print(f\"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0\")\n", + "\n", + "inlier_cloud = pcd.select_by_index(inliers)\n", + "inlier_cloud.paint_uniform_color([1.0, 0, 0])\n", + "outlier_cloud = pcd.select_by_index(inliers, invert=True)\n", + "o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud], zoom=0.8, \n", + " front=[-0.4999, -0.1659, -0.8499],\n", + " lookat=[2.1813, 2.0619, 2.0999],\n", + " up=[0.1204, -0.9852, 0.1215])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Hidden point removal\n", + "Imagine you want to render a point cloud from a given view point, but points from the background leak into the foreground because they are not occluded by other points. For this purpose we can apply a hidden point removal algorithm. In Open3D the method by [\\[Katz2007\\]](../reference.html#Katz2007) is implemented that approximates the visibility of a point cloud from a given view without surface reconstruction or normal estimation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Convert mesh to a point cloud and estimate dimensions\")\n", + "pcd = o3dtut.get_armadillo_mesh().sample_points_poisson_disk(5000)\n", + "diameter = np.linalg.norm(np.asarray(pcd.get_max_bound()) - np.asarray(pcd.get_min_bound()))\n", + "o3d.visualization.draw_geometries([pcd])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Define parameters used for hidden_point_removal\")\n", + "camera = [0, 0, diameter]\n", + "radius = diameter * 100\n", + "\n", + "print(\"Get all points that are visible from given view point\")\n", + "_, pt_map = pcd.hidden_point_removal(camera, radius)\n", + "\n", + "print(\"Visualize result\")\n", + "pcd = pcd.select_by_index(pt_map)\n", + "o3d.visualization.draw_geometries([pcd])" + ] } ], "metadata": { + "celltoolbar": "Edit Metadata", "kernelspec": { "display_name": "Python 3", "language": "python", diff --git a/examples/Python/Basic/pointcloud.py b/examples/Python/Basic/pointcloud.py deleted file mode 100644 index 95fa88efa53..00000000000 --- a/examples/Python/Basic/pointcloud.py +++ /dev/null @@ -1,43 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/pointcloud.py - -import numpy as np -import open3d as o3d - -if __name__ == "__main__": - - print("Load a ply point cloud, print it, and render it") - pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply") - print(pcd) - print(np.asarray(pcd.points)) - o3d.visualization.draw_geometries([pcd]) - - print("Downsample the point cloud with a voxel of 0.05") - downpcd = pcd.voxel_down_sample(voxel_size=0.05) - o3d.visualization.draw_geometries([downpcd]) - - print("Recompute the normal of the downsampled point cloud") - downpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid( - radius=0.1, max_nn=30)) - o3d.visualization.draw_geometries([downpcd]) - - print("Print a normal vector of the 0th point") - print(downpcd.normals[0]) - print("Print the normal vectors of the first 10 points") - print(np.asarray(downpcd.normals)[:10, :]) - print("") - - print("Load a polygon volume and use it to crop the original point cloud") - vol = o3d.visualization.read_selection_polygon_volume( - "../../TestData/Crop/cropped.json") - chair = vol.crop_point_cloud(pcd) - o3d.visualization.draw_geometries([chair]) - print("") - - print("Paint chair") - chair.paint_uniform_color([1, 0.706, 0]) - o3d.visualization.draw_geometries([chair]) - print("") diff --git a/examples/Python/Basic/pointcloud_estimate_normals.py b/examples/Python/Basic/pointcloud_estimate_normals.py deleted file mode 100644 index a268d9165bb..00000000000 --- a/examples/Python/Basic/pointcloud_estimate_normals.py +++ /dev/null @@ -1,87 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/pointcloud_estimate_normals.py - -import numpy as np -import open3d as o3d -import time - -import os -import sys -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(dir_path, '../Misc')) -import meshes - -np.random.seed(42) - - -def knn_generator(): - yield 'knn20', o3d.geometry.KDTreeSearchParamKNN(20) - yield 'radius', o3d.geometry.KDTreeSearchParamRadius(0.01666) - yield 'hybrid', o3d.geometry.KDTreeSearchParamHybrid(0.01666, 60) - - -def pointcloud_generator(): - pts = np.random.uniform(-30, 30, size=(int(1e5), 3)) - pcl = o3d.geometry.PointCloud() - pcl.points = o3d.utility.Vector3dVector(pts) - yield 'uniform', pcl - - yield 'moebius', o3d.geometry.TriangleMesh.create_moebius( - ).sample_points_uniformly(int(1e5)) - - yield 'bunny', meshes.bunny().scale(10).sample_points_uniformly(int(1e5)) - - yield 'fragment', o3d.io.read_point_cloud('../../TestData/fragment.ply') - - -if __name__ == "__main__": - # Benchmark - for pcl_name, pcl in pointcloud_generator(): - for knn_name, knn in knn_generator(): - print('-' * 80) - for fast_normal_computation in [True, False]: - times = [] - for _ in range(50): - tic = time.time() - pcl.estimate_normals( - knn, fast_normal_computation=fast_normal_computation) - times.append(time.time() - tic) - print('fast={}: {}, {} -- avg time={}[s]'.format( - fast_normal_computation, pcl_name, knn_name, - np.mean(times))) - - # Test - for pcl_name, pcl in pointcloud_generator(): - for knn_name, knn in knn_generator(): - pcl.estimate_normals(knn, True) - normals_fast = np.asarray(pcl.normals) - pcl.estimate_normals(knn, False) - normals_eigen = np.asarray(pcl.normals) - test = (normals_eigen * normals_fast).sum(axis=1) - print('normals agree: {}'.format(np.all(test - 1 < 1e-9))) - - # Test normals of flat surface - X, Y = np.mgrid[0:1:0.1, 0:1:0.1] - X = X.flatten() - Y = Y.flatten() - - pts = np.zeros((3, X.size)) - pts[0] = X - pts[1] = Y - - shape = o3d.geometry.PointCloud() - shape.points = o3d.utility.Vector3dVector(pts.T) - shape.paint_uniform_color([0, 0.651, 0.929]) # blue - - shape.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=0.5, - max_nn=30), - fast_normal_computation=True) - o3d.visualization.draw_geometries([shape]) - - shape.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=0.5, - max_nn=30), - fast_normal_computation=False) - o3d.visualization.draw_geometries([shape]) diff --git a/examples/Python/Basic/pointcloud_plane_segmentation.py b/examples/Python/Basic/pointcloud_plane_segmentation.py deleted file mode 100644 index 9e8d16eedc4..00000000000 --- a/examples/Python/Basic/pointcloud_plane_segmentation.py +++ /dev/null @@ -1,28 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/pointcloud_plane_segmentation.py - -import numpy as np -import open3d as o3d - -if __name__ == "__main__": - pcd = o3d.io.read_point_cloud("../../TestData/fragment.pcd") - - print( - "Find the plane model and the inliers of the largest planar segment in the point cloud." - ) - plane_model, inliers = pcd.segment_plane(distance_threshold=0.01, - ransac_n=3, - num_iterations=250) - - [a, b, c, d] = plane_model - print(f"Plane model: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0") - - inlier_cloud = pcd.select_by_index(inliers) - inlier_cloud.paint_uniform_color([1.0, 0, 0]) - - outlier_cloud = pcd.select_by_index(inliers, invert=True) - - o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud]) diff --git a/examples/Python/Basic/python_binding.py b/examples/Python/Basic/python_binding.py deleted file mode 100644 index 08991d7668c..00000000000 --- a/examples/Python/Basic/python_binding.py +++ /dev/null @@ -1,23 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/python_binding.py - -import open3d as o3d - - -def example_import_function(): - pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") - print(pcd) - - -def example_help_function(): - help(o3d) - help(o3d.geometry.PointCloud) - help(o3d.io.read_point_cloud) - - -if __name__ == "__main__": - example_import_function() - example_help_function() diff --git a/examples/Python/Basic/python_interface.ipynb b/examples/Python/Basic/python_interface.ipynb new file mode 100644 index 00000000000..0d1ee644db0 --- /dev/null +++ b/examples/Python/Basic/python_interface.ipynb @@ -0,0 +1,140 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Python Interface\n", + "For the C++ interface see [here](../C++/cplusplus_interface.rst)\n", + "\n", + "## Install open3d Python package\n", + "For installing Open3D Python package, see [here](../../getting_started.rst).\n", + "\n", + "## Install open3d from source\n", + "For installing from source, see [here](../../compilation.rst)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install open3d from source\n", + "This tutorial shows how to import the `open3d` module and print out help information. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import open3d as o3d" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "**Note:** \n", + "\n", + "Depending on environment, the name of Python library may not be `open3d.so`. Regardless of the file name, `import open3d` should work.\n", + "\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcd = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_0.pcd\")\n", + "print(pcd)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This imports the `read_point_cloud` function from the `open3d` module. It reads a point cloud file and returns an instance of the `PointCloud` class. `print(pcd)` prints brief information of the point cloud." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using built-in help function\n", + "\n", + "### Browse open3d\n", + "`help(open3d)` prints documents of `open3d` module." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "help(o3d)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Description of a class in open3d\n", + "`help(open3d.PointCloud)` provides description of the `PointCloud` class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "help(o3d.geometry.PointCloud)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Description of a function in open3d\n", + "`help(open3d.read_point_cloud)` provides a description of the input arguments and return type of the `read_point_cloud function`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "help(o3d.io.read_point_cloud)" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Basic/rgbd_image.ipynb b/examples/Python/Basic/rgbd_image.ipynb new file mode 100644 index 00000000000..26e76a1847c --- /dev/null +++ b/examples/Python/Basic/rgbd_image.ipynb @@ -0,0 +1,326 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RGBD images\n", + "Open3D has a data structure for images. It supports various functions such as `read_image`, `write_image`, `filter_image` and `draw_geometries`. An Open3D Image can be directly converted to/from a numpy array.\n", + "\n", + "An Open3D `RGBDImage` is composed of two images, `RGBDImage.depth` and `RGBDImage.color`. We require the two images to be registered into the same camera frame and have the same resolution. The following tutorials show how to read and use RGBD images from a number of well known RGBD datasets." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Redwood dataset\n", + "In this section we show how to read and visualize an `RGBDImage` from the [Redwood dataset](http://redwood-data.org/) [Choi2015]_.\n", + "\n", + "The Redwood format stored depth in a 16-bit single channel image. The integer value represents the depth measurement in millimeters. It is the default format for Open3D to parse depth images." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Read Redwood dataset\")\n", + "color_raw = o3d.io.read_image(\"../../TestData/RGBD/color/00000.jpg\")\n", + "depth_raw = o3d.io.read_image(\"../../TestData/RGBD/depth/00000.png\")\n", + "rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", + " color_raw, depth_raw)\n", + "print(rgbd_image)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The default conversion function `create_rgbd_image_from_color_and_depth` creates an RGBDImage from a pair of color and depth image. The color image is converted into a grayscale image, stored in `float` ranged in [0, 1]. The depth image is stored in `float`, representing the depth value in meters.\n", + "\n", + "The converted images can be rendered as numpy arrays." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.subplot(1, 2, 1)\n", + "plt.title('Redwood grayscale image')\n", + "plt.imshow(rgbd_image.color)\n", + "plt.subplot(1, 2, 2)\n", + "plt.title('Redwood depth image')\n", + "plt.imshow(rgbd_image.depth)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The RGBD image can be converted into a point cloud, given a set of camera parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", + " rgbd_image,\n", + " o3d.camera.PinholeCameraIntrinsic(\n", + " o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n", + "# Flip it, otherwise the pointcloud will be upside down\n", + "pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n", + "o3d.visualization.draw_geometries([pcd], zoom=0.5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we use `PinholeCameraIntrinsicParameters.PrimeSenseDefault` as default camera parameter. It has image resolution 640x480, focal length (fx, fy) = (525.0, 525.0), and optical center (cx, cy) = (319.5, 239.5). An identity matrix is used as the default extrinsic parameter. `pcd.transform` applies an up-down flip transformation on the point cloud for better visualization purpose. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## SUN dataset\n", + "In this section we show how to read and visualize an RGBDImage of the [SUN dataset](http://rgbd.cs.princeton.edu/) [Song2015]_.\n", + "\n", + "This tutorial is almost the same as the tutorial processing Redwood dataset above. The only difference is that we use the conversion function `create_rgbd_image_from_sun_format` to parse depth images in the SUN dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Read SUN dataset\")\n", + "color_raw = o3d.io.read_image(\n", + " \"../../TestData/RGBD/other_formats/SUN_color.jpg\")\n", + "depth_raw = o3d.io.read_image(\n", + " \"../../TestData/RGBD/other_formats/SUN_depth.png\")\n", + "rgbd_image = o3d.geometry.RGBDImage.create_from_sun_format(\n", + " color_raw, depth_raw)\n", + "print(rgbd_image)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.subplot(1, 2, 1)\n", + "plt.title('SUN grayscale image')\n", + "plt.imshow(rgbd_image.color)\n", + "plt.subplot(1, 2, 2)\n", + "plt.title('SUN depth image')\n", + "plt.imshow(rgbd_image.depth)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", + " rgbd_image,\n", + " o3d.camera.PinholeCameraIntrinsic(\n", + " o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n", + "# Flip it, otherwise the pointcloud will be upside down\n", + "pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n", + "o3d.visualization.draw_geometries([pcd], zoom=0.5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## NYU dataset\n", + "This section shows how to read and visualize an `RGBDImage` from the [NYU dataset](https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html) [Silberman2012]_. \n", + "\n", + "This tutorial is almost the same as the tutorial processing Redwood dataset above, with two differences. First, NYU images are not in standard `jpg` or `png` formats. Thus, we use `mpimg.imread` to read the color image as a numpy array and convert it to an Open3D `Image`. An additional helper function `read_nyu_pgm` is called to read depth images from the special big endian `pgm` format used in the NYU dataset. Second, we use a different conversion function `create_rgbd_image_from_nyu_format` to parse depth images in the SUN dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.image as mpimg\n", + "import re\n", + "\n", + "# This is special function used for reading NYU pgm format\n", + "# as it is written in big endian byte order.\n", + "def read_nyu_pgm(filename, byteorder='>'):\n", + " with open(filename, 'rb') as f:\n", + " buffer = f.read()\n", + " try:\n", + " header, width, height, maxval = re.search(\n", + " b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n", + " b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n", + " b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n", + " b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n", + " except AttributeError:\n", + " raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n", + " img = np.frombuffer(buffer,\n", + " dtype=byteorder + 'u2',\n", + " count=int(width) * int(height),\n", + " offset=len(header)).reshape((int(height), int(width)))\n", + " img_out = img.astype('u2')\n", + " return img_out\n", + "\n", + "\n", + "print(\"Read NYU dataset\")\n", + "# Open3D does not support ppm/pgm file yet. Not using o3d.io.read_image here.\n", + "# MathplotImage having some ISSUE with NYU pgm file. Not using imread for pgm.\n", + "color_raw = mpimg.imread(\"../../TestData/RGBD/other_formats/NYU_color.ppm\")\n", + "depth_raw = read_nyu_pgm(\"../../TestData/RGBD/other_formats/NYU_depth.pgm\")\n", + "color = o3d.geometry.Image(color_raw)\n", + "depth = o3d.geometry.Image(depth_raw)\n", + "rgbd_image = o3d.geometry.RGBDImage.create_from_nyu_format(color, depth)\n", + "print(rgbd_image)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.subplot(1, 2, 1)\n", + "plt.title('NYU grayscale image')\n", + "plt.imshow(rgbd_image.color)\n", + "plt.subplot(1, 2, 2)\n", + "plt.title('NYU depth image')\n", + "plt.imshow(rgbd_image.depth)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", + " rgbd_image,\n", + " o3d.camera.PinholeCameraIntrinsic(\n", + " o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n", + "# Flip it, otherwise the pointcloud will be upside down\n", + "pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n", + "o3d.visualization.draw_geometries([pcd], zoom=0.5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## TUM dataset\n", + "This section shows how to read and visualize an RGBDImage from the [TUM dataset](https://vision.in.tum.de/data/datasets/rgbd-dataset) [Strum2012]_.\n", + "\n", + "This tutorial is almost the same as the tutorial processing Redwood dataset above. The only difference is that we use conversion function `create_rgbd_image_from_tum_format` to parse depth images in the TUM dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Read TUM dataset\")\n", + "color_raw = o3d.io.read_image(\n", + " \"../../TestData/RGBD/other_formats/TUM_color.png\")\n", + "depth_raw = o3d.io.read_image(\n", + " \"../../TestData/RGBD/other_formats/TUM_depth.png\")\n", + "rgbd_image = o3d.geometry.RGBDImage.create_from_tum_format(\n", + " color_raw, depth_raw)\n", + "print(rgbd_image)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.subplot(1, 2, 1)\n", + "plt.title('TUM grayscale image')\n", + "plt.imshow(rgbd_image.color)\n", + "plt.subplot(1, 2, 2)\n", + "plt.title('TUM depth image')\n", + "plt.imshow(rgbd_image.depth)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", + " rgbd_image,\n", + " o3d.camera.PinholeCameraIntrinsic(\n", + " o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n", + "# Flip it, otherwise the pointcloud will be upside down\n", + "pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n", + "o3d.visualization.draw_geometries([pcd], zoom=0.35)" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Basic/rgbd_nyu.py b/examples/Python/Basic/rgbd_nyu.py deleted file mode 100644 index 4c7d9114bac..00000000000 --- a/examples/Python/Basic/rgbd_nyu.py +++ /dev/null @@ -1,58 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/rgbd_nyu.py - -import open3d as o3d -import numpy as np -import re -import matplotlib.image as mpimg -import matplotlib.pyplot as plt - - -# This is special function used for reading NYU pgm format -# as it is written in big endian byte order. -def read_nyu_pgm(filename, byteorder='>'): - with open(filename, 'rb') as f: - buffer = f.read() - try: - header, width, height, maxval = re.search( - b"(^P5\s(?:\s*#.*[\r\n])*" - b"(\d+)\s(?:\s*#.*[\r\n])*" - b"(\d+)\s(?:\s*#.*[\r\n])*" - b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups() - except AttributeError: - raise ValueError("Not a raw PGM file: '%s'" % filename) - img = np.frombuffer(buffer, - dtype=byteorder + 'u2', - count=int(width) * int(height), - offset=len(header)).reshape((int(height), int(width))) - img_out = img.astype('u2') - return img_out - - -if __name__ == "__main__": - print("Read NYU dataset") - # Open3D does not support ppm/pgm file yet. Not using o3d.io.read_image here. - # MathplotImage having some ISSUE with NYU pgm file. Not using imread for pgm. - color_raw = mpimg.imread("../../TestData/RGBD/other_formats/NYU_color.ppm") - depth_raw = read_nyu_pgm("../../TestData/RGBD/other_formats/NYU_depth.pgm") - color = o3d.geometry.Image(color_raw) - depth = o3d.geometry.Image(depth_raw) - rgbd_image = o3d.geometry.RGBDImage.create_from_nyu_format(color, depth) - print(rgbd_image) - plt.subplot(1, 2, 1) - plt.title('NYU grayscale image') - plt.imshow(rgbd_image.color) - plt.subplot(1, 2, 2) - plt.title('NYU depth image') - plt.imshow(rgbd_image.depth) - plt.show() - pcd = o3d.geometry.PointCloud.create_from_rgbd_image( - rgbd_image, - o3d.camera.PinholeCameraIntrinsic( - o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)) - # Flip it, otherwise the pointcloud will be upside down - pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) - o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/rgbd_odometry.ipynb b/examples/Python/Basic/rgbd_odometry.ipynb new file mode 100644 index 00000000000..f8203680206 --- /dev/null +++ b/examples/Python/Basic/rgbd_odometry.ipynb @@ -0,0 +1,200 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RGBD Odometry\n", + "An RGBD odometry finds the camera movement between two consecutive RGBD image pairs. The input are two instances of `RGBDImage`. The output is the motion in the form of a rigid body transformation. Open3D implements the method of [\\[Steinbrucker2011\\]](../reference.html#steinbrucker2011) and [\\[Park2017\\]](../reference.html#park2017)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Read camera intrinsic\n", + "We first read the camera intrinsic matrix from a json file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pinhole_camera_intrinsic = o3d.io.read_pinhole_camera_intrinsic(\n", + " \"../../TestData/camera_primesense.json\")\n", + "print(pinhole_camera_intrinsic.intrinsic_matrix)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "**Note:** \n", + "\n", + "Lots of small data structures in Open3D can be read from / written into `json` files. This includes camera intrinsics, camera trajectory, pose graph, etc.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Read RGBD image\n", + "This code block reads two pairs of RGBD images in the Redwood format. We refer to [Redwood dataset](rgbd_image.ipynb#Redwood-dataset) for a comprehensive explanation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "source_color = o3d.io.read_image(\"../../TestData/RGBD/color/00000.jpg\")\n", + "source_depth = o3d.io.read_image(\"../../TestData/RGBD/depth/00000.png\")\n", + "target_color = o3d.io.read_image(\"../../TestData/RGBD/color/00001.jpg\")\n", + "target_depth = o3d.io.read_image(\"../../TestData/RGBD/depth/00001.png\")\n", + "source_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", + " source_color, source_depth)\n", + "target_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", + " target_color, target_depth)\n", + "target_pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", + " target_rgbd_image, pinhole_camera_intrinsic)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "**Note:** \n", + "\n", + "Open3D assumes the color image and depth image are synchronized and registered in the same coordinate frame. This can usually be done by turning on both the synchronization and registration features in the RGBD camera settings.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compute odometry from two RGBD image pairs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "option = o3d.odometry.OdometryOption()\n", + "odo_init = np.identity(4)\n", + "print(option)\n", + "\n", + "[success_color_term, trans_color_term, info] = o3d.odometry.compute_rgbd_odometry(\n", + " source_rgbd_image, target_rgbd_image, pinhole_camera_intrinsic,\n", + " odo_init, o3d.odometry.RGBDOdometryJacobianFromColorTerm(), option)\n", + "[success_hybrid_term, trans_hybrid_term, info] = o3d.odometry.compute_rgbd_odometry(\n", + " source_rgbd_image, target_rgbd_image, pinhole_camera_intrinsic,\n", + " odo_init, o3d.odometry.RGBDOdometryJacobianFromHybridTerm(), option)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This code block calls two different RGBD odometry methods. The first one is from [\\[Steinbrucker2011\\]](../reference.html#steinbrucker2011). It minimizes photo consistency of aligned images. The second one is from [\\[Park2017\\]](../reference.html#park2017). In addition to photo consistency, it implements constraint for geometry. Both functions run in similar speed. But [\\[Park2017\\]](../reference.html#park2017) is more accurate in our test on benchmark datasets. It is recommended.\n", + "\n", + "Several parameters in `OdometryOption()`:\n", + "\n", + "- `minimum_correspondence_ratio`: After alignment, measure the overlapping ratio of two RGBD images. If overlapping region of two RGBD image is smaller than specified ratio, the odometry module regards that this is a failure case.\n", + "- `max_depth_diff`: In depth image domain, if two aligned pixels have a depth difference less than specified value, they are considered as a correspondence. Larger value induce more aggressive search, but it is prone to unstable result.\n", + "- `min_depth` and `max_depth`: Pixels that has smaller or larger than specified depth values are ignored." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize RGBD image pairs\n", + "The RGBD image pairs are converted into point clouds and rendered together. Note that the point cloud representing the first (source) RGBD image is transformed with the transformation estimated by the odometry. After this transformation, both point clouds are aligned." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if success_color_term:\n", + " print(\"Using RGB-D Odometry\")\n", + " print(trans_color_term)\n", + " source_pcd_color_term = o3d.geometry.PointCloud.create_from_rgbd_image(\n", + " source_rgbd_image, pinhole_camera_intrinsic)\n", + " source_pcd_color_term.transform(trans_color_term)\n", + " o3d.visualization.draw_geometries([target_pcd, source_pcd_color_term],\n", + " zoom=0.48,\n", + " front=[0.0999, -0.1787, -0.9788],\n", + " lookat=[0.0345, -0.0937, 1.8033],\n", + " up=[-0.0067, -0.9838, 0.1790])\n", + "if success_hybrid_term:\n", + " print(\"Using Hybrid RGB-D Odometry\")\n", + " print(trans_hybrid_term)\n", + " source_pcd_hybrid_term = o3d.geometry.PointCloud.create_from_rgbd_image(\n", + " source_rgbd_image, pinhole_camera_intrinsic)\n", + " source_pcd_hybrid_term.transform(trans_hybrid_term)\n", + " o3d.visualization.draw_geometries([target_pcd, source_pcd_hybrid_term],\n", + " zoom=0.48,\n", + " front=[0.0999, -0.1787, -0.9788],\n", + " lookat=[0.0345, -0.0937, 1.8033],\n", + " up=[-0.0067, -0.9838, 0.1790])" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Basic/rgbd_odometry.py b/examples/Python/Basic/rgbd_odometry.py deleted file mode 100644 index e014f6e5374..00000000000 --- a/examples/Python/Basic/rgbd_odometry.py +++ /dev/null @@ -1,52 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/rgbd_odometry.py - -import open3d as o3d -import numpy as np - -if __name__ == "__main__": - pinhole_camera_intrinsic = o3d.io.read_pinhole_camera_intrinsic( - "../../TestData/camera_primesense.json") - print(pinhole_camera_intrinsic.intrinsic_matrix) - - source_color = o3d.io.read_image("../../TestData/RGBD/color/00000.jpg") - source_depth = o3d.io.read_image("../../TestData/RGBD/depth/00000.png") - target_color = o3d.io.read_image("../../TestData/RGBD/color/00001.jpg") - target_depth = o3d.io.read_image("../../TestData/RGBD/depth/00001.png") - source_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth( - source_color, source_depth) - target_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth( - target_color, target_depth) - target_pcd = o3d.geometry.PointCloud.create_from_rgbd_image( - target_rgbd_image, pinhole_camera_intrinsic) - - option = o3d.odometry.OdometryOption() - odo_init = np.identity(4) - print(option) - - [success_color_term, trans_color_term, - info] = o3d.odometry.compute_rgbd_odometry( - source_rgbd_image, target_rgbd_image, pinhole_camera_intrinsic, - odo_init, o3d.odometry.RGBDOdometryJacobianFromColorTerm(), option) - [success_hybrid_term, trans_hybrid_term, - info] = o3d.odometry.compute_rgbd_odometry( - source_rgbd_image, target_rgbd_image, pinhole_camera_intrinsic, - odo_init, o3d.odometry.RGBDOdometryJacobianFromHybridTerm(), option) - - if success_color_term: - print("Using RGB-D Odometry") - print(trans_color_term) - source_pcd_color_term = o3d.geometry.PointCloud.create_from_rgbd_image( - source_rgbd_image, pinhole_camera_intrinsic) - source_pcd_color_term.transform(trans_color_term) - o3d.visualization.draw_geometries([target_pcd, source_pcd_color_term]) - if success_hybrid_term: - print("Using Hybrid RGB-D Odometry") - print(trans_hybrid_term) - source_pcd_hybrid_term = o3d.geometry.PointCloud.create_from_rgbd_image( - source_rgbd_image, pinhole_camera_intrinsic) - source_pcd_hybrid_term.transform(trans_hybrid_term) - o3d.visualization.draw_geometries([target_pcd, source_pcd_hybrid_term]) diff --git a/examples/Python/Basic/rgbd_redwood.py b/examples/Python/Basic/rgbd_redwood.py deleted file mode 100644 index 60bfa88ac1c..00000000000 --- a/examples/Python/Basic/rgbd_redwood.py +++ /dev/null @@ -1,32 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/rgbd_redwood.py - -import open3d as o3d -import matplotlib.pyplot as plt - -if __name__ == "__main__": - print("Read Redwood dataset") - color_raw = o3d.io.read_image("../../TestData/RGBD/color/00000.jpg") - depth_raw = o3d.io.read_image("../../TestData/RGBD/depth/00000.png") - rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth( - color_raw, depth_raw) - print(rgbd_image) - - plt.subplot(1, 2, 1) - plt.title('Redwood grayscale image') - plt.imshow(rgbd_image.color) - plt.subplot(1, 2, 2) - plt.title('Redwood depth image') - plt.imshow(rgbd_image.depth) - plt.show() - - pcd = o3d.geometry.PointCloud.create_from_rgbd_image( - rgbd_image, - o3d.camera.PinholeCameraIntrinsic( - o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)) - # Flip it, otherwise the pointcloud will be upside down - pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) - o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/rgbd_sun.py b/examples/Python/Basic/rgbd_sun.py deleted file mode 100644 index 180719f84ca..00000000000 --- a/examples/Python/Basic/rgbd_sun.py +++ /dev/null @@ -1,32 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/rgbd_sun.py - -import open3d as o3d -import matplotlib.pyplot as plt - -if __name__ == "__main__": - print("Read SUN dataset") - color_raw = o3d.io.read_image( - "../../TestData/RGBD/other_formats/SUN_color.jpg") - depth_raw = o3d.io.read_image( - "../../TestData/RGBD/other_formats/SUN_depth.png") - rgbd_image = o3d.geometry.RGBDImage.create_from_sun_format( - color_raw, depth_raw) - print(rgbd_image) - plt.subplot(1, 2, 1) - plt.title('SUN grayscale image') - plt.imshow(rgbd_image.color) - plt.subplot(1, 2, 2) - plt.title('SUN depth image') - plt.imshow(rgbd_image.depth) - plt.show() - pcd = o3d.geometry.PointCloud.create_from_rgbd_image( - rgbd_image, - o3d.camera.PinholeCameraIntrinsic( - o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)) - # Flip it, otherwise the pointcloud will be upside down - pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) - o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/rgbd_tum.py b/examples/Python/Basic/rgbd_tum.py deleted file mode 100644 index ab0ebb30dbb..00000000000 --- a/examples/Python/Basic/rgbd_tum.py +++ /dev/null @@ -1,32 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/rgbd_tum.py - -import open3d as o3d -import matplotlib.pyplot as plt - -if __name__ == "__main__": - print("Read TUM dataset") - color_raw = o3d.io.read_image( - "../../TestData/RGBD/other_formats/TUM_color.png") - depth_raw = o3d.io.read_image( - "../../TestData/RGBD/other_formats/TUM_depth.png") - rgbd_image = o3d.geometry.RGBDImage.create_from_tum_format( - color_raw, depth_raw) - print(rgbd_image) - plt.subplot(1, 2, 1) - plt.title('TUM grayscale image') - plt.imshow(rgbd_image.color) - plt.subplot(1, 2, 2) - plt.title('TUM depth image') - plt.imshow(rgbd_image.depth) - plt.show() - pcd = o3d.geometry.PointCloud.create_from_rgbd_image( - rgbd_image, - o3d.camera.PinholeCameraIntrinsic( - o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)) - # Flip it, otherwise the pointcloud will be upside down - pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) - o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/transformation.ipynb b/examples/Python/Basic/transformation.ipynb new file mode 100644 index 00000000000..40cd765d21c --- /dev/null +++ b/examples/Python/Basic/transformation.ipynb @@ -0,0 +1,206 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import copy\n", + "import sys\n", + "\n", + "# only needed for tutorial, monkey patches visualization\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Transformation\n", + "The geometry types of Open3D have a number of transformation methods. In this tutorial we show how to use `translate`, `rotate`, `scale`, and `transform`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Translate\n", + "The first transformation method we want to look at is `translate`. The translate method takes a single 3D vector $t$ as input and translates all points/vertices of the geometry by this vector, $v_t = v + t$. The code below shows how the mesh is once translated in the x- and once in the y-direction." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()\n", + "mesh_tx = copy.deepcopy(mesh).translate((1.3,0,0))\n", + "mesh_ty = copy.deepcopy(mesh).translate((0,1.3,0))\n", + "print(f'Center of mesh: {mesh.get_center()}')\n", + "print(f'Center of mesh tx: {mesh_tx.get_center()}')\n", + "print(f'Center of mesh ty: {mesh_ty.get_center()}')\n", + "o3d.visualization.draw_geometries([mesh, mesh_tx, mesh_ty])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The method takes a second argument `relative` that is default set to `true`. If we change it to `False` than the center of the geometry is translated directly to the position specified in the first argument." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(origin=(1,2,3))\n", + "mesh_mv = copy.deepcopy(mesh).translate((2,2,2), relative=False)\n", + "print(f'Center of mesh: {mesh.get_center()}')\n", + "print(f'Center of translated mesh: {mesh_mv.get_center()}')\n", + "o3d.visualization.draw_geometries([mesh, mesh_mv])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Rotation\n", + "The geometry types of Open3D can also be rotated with the method `rotate`. It takes as first argument a rotation matrix `R`. As rotations in 3D can be parametrized in a number of ways, Open3D provides convenience functions to convert from different parametrizations to rotation matrices:\n", + "\n", + "- Convert from [Euler angles](https://en.wikipedia.org/wiki/Euler_angles) with `get_rotation_matrix_from_xyz` (where `xyz` can also be of the form `yzx` `zxy`, `xzy`, `zyx`, and `yxz`)\n", + "- Convert from [Axis-angle representation](https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation) with `get_rotation_matrix_from_axis_angle`\n", + "- Convert from [Quaternions](https://en.wikipedia.org/wiki/Quaternion) with `get_rotation_matrix_from_quaternion`\n", + "\n", + "In the code below we first translate the mesh and then rotate it using Euler angles." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()\n", + "mesh_r = copy.deepcopy(mesh).translate((2,0,0))\n", + "mesh_r.rotate(mesh.get_rotation_matrix_from_xyz((np.pi/2,0,np.pi/4)))\n", + "o3d.visualization.draw_geometries([mesh, mesh_r])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The function `rotate` has a second argument `center` that is default set to `True`. This indicates that the object is first centered prior to applying the rotation and the moved back to its previous center. If this argument is set to `False`, then the rotation will be applied directly, which rotates the whole geometry around the coordinate center. This implies that the mesh center can be changed after the rotation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()\n", + "mesh_r = copy.deepcopy(mesh).translate((2,0,0))\n", + "mesh_r.rotate(mesh.get_rotation_matrix_from_xyz((np.pi/2,0,np.pi/4)), center=False)\n", + "o3d.visualization.draw_geometries([mesh, mesh_r])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Scale\n", + "Vertices and points of Open3D geometry types can also be scaled using `scale`, $v_s = s \\cdot v$. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()\n", + "mesh_s = copy.deepcopy(mesh).translate((2,0,0))\n", + "mesh_s.scale(0.5)\n", + "o3d.visualization.draw_geometries([mesh, mesh_s])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `scale` method also has a second argument `center` that is set to `True` by default. If it is set to `False`, then the object is not centered prior to scaling and this can then move the center of the object." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()\n", + "mesh_s = copy.deepcopy(mesh).translate((2,1,0))\n", + "mesh_s.scale(0.5, center=False)\n", + "o3d.visualization.draw_geometries([mesh, mesh_s])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## General transformation\n", + "Open3D also supports a general transformation by a general $4x4$ matrix using the method `transform`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()\n", + "T = np.eye(4)\n", + "T[:3,:3] = mesh.get_rotation_matrix_from_xyz((0,np.pi/3, np.pi/2))\n", + "T[0,3] = 1\n", + "T[1,3] = 1.3\n", + "print(T)\n", + "mesh_t = copy.deepcopy(mesh).transform(T)\n", + "o3d.visualization.draw_geometries([mesh, mesh_t])" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/Python/Basic/transformation.py b/examples/Python/Basic/transformation.py deleted file mode 100644 index d6067747022..00000000000 --- a/examples/Python/Basic/transformation.py +++ /dev/null @@ -1,68 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Utility/transformation.py - -import numpy as np -import open3d as o3d -import time - - -def geometry_generator(): - mesh = o3d.geometry.TriangleMesh.create_sphere() - verts = np.asarray(mesh.vertices) - colors = np.random.uniform(0, 1, size=verts.shape) - mesh.vertex_colors = o3d.utility.Vector3dVector(colors) - mesh.compute_vertex_normals() - - pcl = o3d.geometry.PointCloud() - pcl.points = mesh.vertices - pcl.colors = mesh.vertex_colors - pcl.normals = mesh.vertex_normals - yield pcl - - yield o3d.geometry.LineSet.create_from_triangle_mesh(mesh) - - yield mesh - - -def animate(geom): - vis = o3d.visualization.Visualizer() - vis.create_window() - - geom.rotate(geom.get_rotation_matrix_from_xyz((0.75, 0.5, 0))) - vis.add_geometry(geom) - - scales = [0.9 for _ in range(30)] + [1 / 0.9 for _ in range(30)] - axisangles = [(0.2 / np.sqrt(2), 0.2 / np.sqrt(2), 0) for _ in range(60)] - ts = [(0.1, 0.1, -0.1) for _ in range(30) - ] + [(-0.1, -0.1, 0.1) for _ in range(30)] - - for scale, aa in zip(scales, axisangles): - R = geom.get_rotation_matrix_from_axis_angle(aa) - geom.scale(scale).rotate(R, center=False) - vis.update_geometry(geom) - vis.poll_events() - vis.update_renderer() - time.sleep(0.05) - - for t in ts: - geom.translate(t) - vis.update_geometry(geom) - vis.poll_events() - vis.update_renderer() - time.sleep(0.05) - - for scale, aa, t in zip(scales, axisangles, ts): - R = geom.get_rotation_matrix_from_axis_angle(aa) - geom.scale(scale).translate(t).rotate(R, center=True) - vis.update_geometry(geom) - vis.poll_events() - vis.update_renderer() - time.sleep(0.05) - - -if __name__ == "__main__": - for geom in geometry_generator(): - animate(geom) diff --git a/examples/Python/Basic/visualization.ipynb b/examples/Python/Basic/visualization.ipynb new file mode 100644 index 00000000000..af3d3058324 --- /dev/null +++ b/examples/Python/Basic/visualization.ipynb @@ -0,0 +1,248 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Visualization\n", + "## Function draw_geometries\n", + "Open3D provides a convenient visualization function `draw_geometries` which takes a list of geometry objects (`PointCloud`, `TriangleMesh`, or `Image`), and renders them together. We have implemented many functions in the visualizer, such as rotation, translation, and scaling via mouse operations, changing rendering style, and screen capture. Press `h` inside the window to print out a comprehensive list of functions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Load a ply point cloud, print it, and render it\")\n", + "pcd = o3d.io.read_point_cloud(\"../../TestData/fragment.ply\")\n", + "o3d.visualization.draw_geometries([pcd], zoom=0.3412, \n", + " front=[0.4257, -0.2125, -0.8795],\n", + " lookat=[2.6172, 2.0475, 1.532],\n", + " up=[-0.0694, -0.9768, 0.2024])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```\n", + "-- Mouse view control --\n", + " Left button + drag : Rotate.\n", + " Ctrl + left button + drag : Translate.\n", + " Wheel button + drag : Translate.\n", + " Shift + left button + drag : Roll.\n", + " Wheel : Zoom in/out.\n", + "\n", + "-- Keyboard view control --\n", + " [/] : Increase/decrease field of view.\n", + " R : Reset view point.\n", + " Ctrl/Cmd + C : Copy current view status into the clipboard.\n", + " Ctrl/Cmd + V : Paste view status from clipboard.\n", + "\n", + "-- General control --\n", + " Q, Esc : Exit window.\n", + " H : Print help message.\n", + " P, PrtScn : Take a screen capture.\n", + " D : Take a depth capture.\n", + " O : Take a capture of current rendering settings.\n", + "```\n", + "\n", + "
\n", + " \n", + "**Note:** \n", + "\n", + "In some operating systems (e.g., OS X), the visualization window may not respond to keyboard input. This is usually because the console retains the input focus instead of passing it to the visualization window. Running with `pythonw` instead of `python` will resolve this issue.\n", + "\n", + "
\n", + "\n", + "
\n", + " \n", + "**Note:** \n", + "\n", + "In addition to `draw_geometries`, Open3D has a set of sibling functions with more advanced functionality. `draw_geometries_with_custom_animation` allows the programmer to define a custom view trajectory and play an animation in the GUI. `draw_geometries_with_animation_callback` and `draw_geometries_with_key_callback` accept Python callback functions as input. The callback function is called in an automatic animation loop, or upon a key press event. See [Customized visualization](../Advanced/customized_visualization.rst) for details.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Store view point\n", + "In the beginning, the point cloud is rendered upside down.\n", + "\n", + "![bad view](../../_static/Basic/visualization/badview.png)\n", + "\n", + "After adjusting view points using mouse left button + drag, we can reach a better view point.\n", + "\n", + "![color view](../../_static/Basic/visualization/color.png)\n", + "\n", + "To retain this view point, press ctrl+c. The view point will be translated into a json string stored in clipboard. When you move the camera to a different view, such as:\n", + "\n", + "![new view](../../_static/Basic/visualization/newview.png)\n", + "\n", + "You can get back to the original view by pressing ctrl+v.\n", + "\n", + "![color view](../../_static/Basic/visualization/color.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Rendering styles\n", + "Open3D `Visualizer` supports several rendering styles. For example, pressing l will switch between a Phong lighting and a simple color rendering. Pressing 2 shows points colored based on x-coordinate.\n", + "\n", + "![colormap jet](../../_static/Basic/visualization/colormap_jet.png)\n", + "\n", + "The color map can also be adjusted by, for example, pressing shift+4. This changes jet color map to hot color map.\n", + "\n", + "![colormap hot](../../_static/Basic/visualization/colormap_hot.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Geometry primitives\n", + "This code below generates a cubic, a sphere, and a cylinder using `create_mesh_cubic`, `create_mesh_sphere` and `create_mesh_cylinder`. The cubic is painted in red, sphere is painted in blue, and the cylinder is painted in green. Normals are computed for both meshes to support the Phong shading (see [Visualize 3D mesh](mesh.ipynb#visualize-a-3d-mesh) and [Surface normal estimation](mesh.ipynb#surface-normal-estimation)). We can even create a coordinate axis using `create_mesh_coordinate_frame`, with its origin point set at (-2, -2, -2). TODO links" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Let's define some primitives\")\n", + "mesh_box = o3d.geometry.TriangleMesh.create_box(width=1.0,\n", + " height=1.0,\n", + " depth=1.0)\n", + "mesh_box.compute_vertex_normals()\n", + "mesh_box.paint_uniform_color([0.9, 0.1, 0.1])\n", + "mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=1.0)\n", + "mesh_sphere.compute_vertex_normals()\n", + "mesh_sphere.paint_uniform_color([0.1, 0.1, 0.7])\n", + "mesh_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=0.3,\n", + " height=4.0)\n", + "mesh_cylinder.compute_vertex_normals()\n", + "mesh_cylinder.paint_uniform_color([0.1, 0.9, 0.1])\n", + "mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(\n", + " size=0.6, origin=[-2, -2, -2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`draw_geometries` takes a list of geometries and renders them all together. Alternatively, `TriangleMesh` supports a + operator to combine multiple meshes into one. We recommend the first approach since it supports a combination of different geometries (e.g., a mesh can be rendered in tandem with a point cloud)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"We draw a few primitives using collection.\")\n", + "o3d.visualization.draw_geometries(\n", + " [mesh_box, mesh_sphere, mesh_cylinder, mesh_frame])\n", + "\n", + "print(\"We draw a few primitives using + operator of mesh.\")\n", + "o3d.visualization.draw_geometries(\n", + " [mesh_box + mesh_sphere + mesh_cylinder + mesh_frame])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Draw line set\n", + "To draw lines, it is necessary to define `LineSet` and create a set of `points` and a set of edges. An edge is a pair of point indices. The above example creates custom points and edges (denoted as `lines`) to make a cubic. Color is optional - red color `[1,0,0]` is assigned to each edge in this example. This script visualizes the following cubic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Let's draw a cubic using o3d.geometry.LineSet.\")\n", + "points = [\n", + " [0, 0, 0],\n", + " [1, 0, 0],\n", + " [0, 1, 0],\n", + " [1, 1, 0],\n", + " [0, 0, 1],\n", + " [1, 0, 1],\n", + " [0, 1, 1],\n", + " [1, 1, 1],\n", + "]\n", + "lines = [\n", + " [0, 1],\n", + " [0, 2],\n", + " [1, 3],\n", + " [2, 3],\n", + " [4, 5],\n", + " [4, 6],\n", + " [5, 7],\n", + " [6, 7],\n", + " [0, 4],\n", + " [1, 5],\n", + " [2, 6],\n", + " [3, 7],\n", + "]\n", + "colors = [[1, 0, 0] for i in range(len(lines))]\n", + "line_set = o3d.geometry.LineSet(\n", + " points=o3d.utility.Vector3dVector(points),\n", + " lines=o3d.utility.Vector2iVector(lines),\n", + ")\n", + "line_set.colors = o3d.utility.Vector3dVector(colors)\n", + "o3d.visualization.draw_geometries([line_set], zoom=0.8)" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Basic/visualization.py b/examples/Python/Basic/visualization.py deleted file mode 100644 index ab7273e7f6c..00000000000 --- a/examples/Python/Basic/visualization.py +++ /dev/null @@ -1,76 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/visualization.py - -import numpy as np -import open3d as o3d - -if __name__ == "__main__": - - print("Load a ply point cloud, print it, and render it") - pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply") - o3d.visualization.draw_geometries([pcd]) - - print("Let's draw some primitives") - mesh_box = o3d.geometry.TriangleMesh.create_box(width=1.0, - height=1.0, - depth=1.0) - mesh_box.compute_vertex_normals() - mesh_box.paint_uniform_color([0.9, 0.1, 0.1]) - mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=1.0) - mesh_sphere.compute_vertex_normals() - mesh_sphere.paint_uniform_color([0.1, 0.1, 0.7]) - mesh_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=0.3, - height=4.0) - mesh_cylinder.compute_vertex_normals() - mesh_cylinder.paint_uniform_color([0.1, 0.9, 0.1]) - mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame( - size=0.6, origin=[-2, -2, -2]) - - print("We draw a few primitives using collection.") - o3d.visualization.draw_geometries( - [mesh_box, mesh_sphere, mesh_cylinder, mesh_frame]) - - print("We draw a few primitives using + operator of mesh.") - o3d.visualization.draw_geometries( - [mesh_box + mesh_sphere + mesh_cylinder + mesh_frame]) - - print("Let's draw a cubic using o3d.geometry.LineSet.") - points = [ - [0, 0, 0], - [1, 0, 0], - [0, 1, 0], - [1, 1, 0], - [0, 0, 1], - [1, 0, 1], - [0, 1, 1], - [1, 1, 1], - ] - lines = [ - [0, 1], - [0, 2], - [1, 3], - [2, 3], - [4, 5], - [4, 6], - [5, 7], - [6, 7], - [0, 4], - [1, 5], - [2, 6], - [3, 7], - ] - colors = [[1, 0, 0] for i in range(len(lines))] - line_set = o3d.geometry.LineSet( - points=o3d.utility.Vector3dVector(points), - lines=o3d.utility.Vector2iVector(lines), - ) - line_set.colors = o3d.utility.Vector3dVector(colors) - o3d.visualization.draw_geometries([line_set]) - - print("Let's draw a textured triangle mesh from obj file.") - textured_mesh = o3d.io.read_triangle_mesh("../../TestData/crate/crate.obj") - textured_mesh.compute_vertex_normals() - o3d.visualization.draw_geometries([textured_mesh]) diff --git a/examples/Python/Basic/working_with_numpy.ipynb b/examples/Python/Basic/working_with_numpy.ipynb new file mode 100644 index 00000000000..6b6469b3481 --- /dev/null +++ b/examples/Python/Basic/working_with_numpy.ipynb @@ -0,0 +1,120 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbsphinx": "hidden" + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "import sys\n", + "\n", + "# monkey patches visualization and provides helpers to load geometries\n", + "sys.path.append('..')\n", + "import open3d_tutorial as o3dtut\n", + "# change to True if you want to interact with the visualization windows\n", + "o3dtut.interactive = False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Working with NumPy\n", + "Data structure of Open3D is natively compatible with [NumPy](https://numpy.org/) buffer. The following tutorial generates a variant of sync function using NumPy and visualizes the function using Open3D.\n", + "\n", + "First, we generates a $nx3$ matrix `xyz`. Each column has $x$, $y$, $z$ value of a function $z = \\frac{sin(x^2 + y^2)}{x^2 + y^2}$. $z_\\text{norm}$ is the normalized map of $z$ for the [0,1] range." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# generate some neat n times 3 matrix using a variant of sync function\n", + "x = np.linspace(-3, 3, 401)\n", + "mesh_x, mesh_y = np.meshgrid(x, x)\n", + "z = np.sinc((np.power(mesh_x, 2) + np.power(mesh_y, 2)))\n", + "z_norm = (z - z.min()) / (z.max() - z.min())\n", + "xyz = np.zeros((np.size(mesh_x), 3))\n", + "xyz[:, 0] = np.reshape(mesh_x, -1)\n", + "xyz[:, 1] = np.reshape(mesh_y, -1)\n", + "xyz[:, 2] = np.reshape(z_norm, -1)\n", + "print('xyz')\n", + "print(xyz)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## From NumPy to open3d.PointCloud\n", + "Open3D provides conversion from NumPy matrix to a vector of 3D vectors. By using `Vector3dVector`, a NumPy matrix can be directly assigned to `open3d.PointCloud.points`.\n", + "\n", + "In this manner, any similar data structure such as `open3d.PointCloud.colors` or `open3d.PointCloud.normals` can be assigned or modified using NumPy. The code below also saves the point cloud as a ply file for the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Pass xyz to Open3D.o3d.geometry.PointCloud and visualize\n", + "pcd = o3d.geometry.PointCloud()\n", + "pcd.points = o3d.utility.Vector3dVector(xyz)\n", + "o3d.io.write_point_cloud(\"../../TestData/sync.ply\", pcd)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## From open3d.PointCloud to NumPy\n", + "As shown in this example, `pcd_load.points` of type `Vector3dVector` is converted into a NumPy array using `np.asarray`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load saved point cloud and visualize it\n", + "pcd_load = o3d.io.read_point_cloud(\"../../TestData/sync.ply\")\n", + "\n", + "# convert Open3D.o3d.geometry.PointCloud to numpy array\n", + "xyz_load = np.asarray(pcd_load.points)\n", + "print('xyz_load')\n", + "print(xyz_load)\n", + "o3d.visualization.draw_geometries([pcd_load])" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/Python/Basic/working_with_numpy.py b/examples/Python/Basic/working_with_numpy.py deleted file mode 100644 index f7dcfc7d4c5..00000000000 --- a/examples/Python/Basic/working_with_numpy.py +++ /dev/null @@ -1,42 +0,0 @@ -# Open3D: www.open3d.org -# The MIT License (MIT) -# See license file or visit www.open3d.org for details - -# examples/Python/Basic/working_with_numpy.py - -import copy -import numpy as np -import open3d as o3d - -if __name__ == "__main__": - - # generate some neat n times 3 matrix using a variant of sync function - x = np.linspace(-3, 3, 401) - mesh_x, mesh_y = np.meshgrid(x, x) - z = np.sinc((np.power(mesh_x, 2) + np.power(mesh_y, 2))) - z_norm = (z - z.min()) / (z.max() - z.min()) - xyz = np.zeros((np.size(mesh_x), 3)) - xyz[:, 0] = np.reshape(mesh_x, -1) - xyz[:, 1] = np.reshape(mesh_y, -1) - xyz[:, 2] = np.reshape(z_norm, -1) - print('xyz') - print(xyz) - - # Pass xyz to Open3D.o3d.geometry.PointCloud and visualize - pcd = o3d.geometry.PointCloud() - pcd.points = o3d.utility.Vector3dVector(xyz) - o3d.io.write_point_cloud("../../TestData/sync.ply", pcd) - - # Load saved point cloud and visualize it - pcd_load = o3d.io.read_point_cloud("../../TestData/sync.ply") - o3d.visualization.draw_geometries([pcd_load]) - - # convert Open3D.o3d.geometry.PointCloud to numpy array - xyz_load = np.asarray(pcd_load.points) - print('xyz_load') - print(xyz_load) - - # save z_norm as an image (change [0,1] range to [0,255] range with uint8 type) - img = o3d.geometry.Image((z_norm * 255).astype(np.uint8)) - o3d.io.write_image("../../TestData/sync.png", img) - o3d.visualization.draw_geometries([img]) diff --git a/examples/Python/Misc/meshes.py b/examples/Python/Misc/meshes.py index 8b3bc3ea4c0..4a32703abcd 100644 --- a/examples/Python/Misc/meshes.py +++ b/examples/Python/Misc/meshes.py @@ -62,8 +62,10 @@ def plane(height=0.2, width=1): def non_manifold_edge(): - verts = np.array([[-1, 0, 0], [0, 1, 0], [1, 0, 0], [0, -1, 0], [0, 0, 1]], - dtype=np.float64) + verts = np.array( + [[-1, 0, 0], [0, 1, 0], [1, 0, 0], [0, -1, 0], [0, 0, 1]], + dtype=np.float64, + ) triangles = np.array([[0, 1, 3], [1, 2, 3], [1, 3, 4]]) mesh = o3d.geometry.TriangleMesh() mesh.vertices = o3d.utility.Vector3dVector(verts) @@ -165,8 +167,12 @@ def bunny(): with tarfile.open(bunny_path + ".tar.gz") as tar: tar.extractall(path=os.path.dirname(bunny_path)) shutil.move( - os.path.join(os.path.dirname(bunny_path), "bunny", "reconstruction", - "bun_zipper.ply"), + os.path.join( + os.path.dirname(bunny_path), + "bunny", + "reconstruction", + "bun_zipper.ply", + ), bunny_path, ) os.remove(bunny_path + ".tar.gz") diff --git a/examples/Python/jupyter_run_all.py b/examples/Python/jupyter_run_all.py new file mode 100644 index 00000000000..482d71526bc --- /dev/null +++ b/examples/Python/jupyter_run_all.py @@ -0,0 +1,21 @@ +import nbformat +import nbconvert +from pathlib import Path + +if __name__ == "__main__": + file_dir = Path(__file__).absolute().parent + nb_paths = sorted((file_dir / "Basic").glob("*.ipynb")) + nb_paths += sorted((file_dir / "Advanced").glob("*.ipynb")) + for nb_path in nb_paths: + print(f"[Executing notebook {nb_path.name}]") + + with open(nb_path) as f: + nb = nbformat.read(f, as_version=4) + ep = nbconvert.preprocessors.ExecutePreprocessor(timeout=6000) + try: + ep.preprocess(nb, {"metadata": {"path": nb_path.parent}}) + except nbconvert.preprocessors.execute.CellExecutionError: + print(f"Execution of {nb_path.name} failed") + + with open(nb_path, "w", encoding="utf-8") as f: + nbformat.write(nb, f) diff --git a/examples/Python/jupyter_strip_output.py b/examples/Python/jupyter_strip_output.py new file mode 100644 index 00000000000..3aa786edc87 --- /dev/null +++ b/examples/Python/jupyter_strip_output.py @@ -0,0 +1,21 @@ +import nbformat +import nbconvert +from pathlib import Path + +if __name__ == "__main__": + file_dir = Path(__file__).absolute().parent + nb_paths = sorted((file_dir / "Basic").glob("*.ipynb")) + nb_paths += sorted((file_dir / "Advanced").glob("*.ipynb")) + for nb_path in nb_paths: + print(f"Clean {nb_path.name}") + + with open(nb_path) as f: + nb = nbformat.read(f, as_version=4) + ep = nbconvert.preprocessors.ClearOutputPreprocessor(timeout=6000) + try: + ep.preprocess(nb, {"metadata": {"path": nb_path.parent}}) + except nbconvert.preprocessors.execute.CellExecutionError: + print(f"Cleaning of {nb_path.name} failed") + + with open(nb_path, "w", encoding="utf-8") as f: + nbformat.write(nb, f) diff --git a/examples/Python/open3d_tutorial.py b/examples/Python/open3d_tutorial.py new file mode 100644 index 00000000000..6f51e79a561 --- /dev/null +++ b/examples/Python/open3d_tutorial.py @@ -0,0 +1,221 @@ +# Helpers and monkey patches for ipynb tutorials + +import open3d as o3d +import numpy as np +import PIL.Image +import IPython.display +import os +import urllib +import tarfile +import gzip +import zipfile +import shutil + +interactive = True + + +def jupyter_draw_geometries( + geoms, + window_name="Open3D", + width=1920, + height=1080, + left=50, + top=50, + point_show_normal=False, + mesh_show_wireframe=False, + mesh_show_back_face=False, + lookat=None, + up=None, + front=None, + zoom=None, +): + vis = o3d.visualization.Visualizer() + vis.create_window( + window_name=window_name, + width=width, + height=height, + left=left, + top=top, + visible=True, # If false, capture_screen_float_buffer() won't work. + ) + vis.get_render_option().point_show_normal = point_show_normal + vis.get_render_option().mesh_show_wireframe = mesh_show_wireframe + vis.get_render_option().mesh_show_back_face = mesh_show_back_face + for geom in geoms: + vis.add_geometry(geom) + if lookat is not None: + vis.get_view_control().set_lookat(lookat) + if up is not None: + vis.get_view_control().set_up(up) + if front is not None: + vis.get_view_control().set_front(front) + if zoom is not None: + vis.get_view_control().set_zoom(zoom) + if interactive: + vis.run() + else: + for geom in geoms: + vis.update_geometry(geom) + vis.poll_events() + vis.update_renderer() + im = vis.capture_screen_float_buffer() + vis.destroy_window() + im = (255 * np.asarray(im)).astype(np.uint8) + IPython.display.display(PIL.Image.fromarray(im, "RGB")) + + +o3d.visualization.draw_geometries = jupyter_draw_geometries + + +def edges_to_lineset(mesh, edges, color): + ls = o3d.geometry.LineSet() + ls.points = mesh.vertices + ls.lines = edges + colors = np.empty((np.asarray(edges).shape[0], 3)) + colors[:] = color + ls.colors = o3d.utility.Vector3dVector(colors) + return ls + + +def _relative_path(path): + script_path = os.path.realpath(__file__) + script_dir = os.path.dirname(script_path) + return os.path.join(script_dir, path) + + +def download_fountain_dataset(): + fountain_path = _relative_path("../TestData/fountain_small") + fountain_zip_path = _relative_path("../TestData/fountain.zip") + if not os.path.exists(fountain_path): + print("downloading fountain dataset") + url = "https://storage.googleapis.com/isl-datasets/open3d-dev/fountain.zip" + urllib.request.urlretrieve(url, fountain_zip_path) + print("extract fountain dataset") + with zipfile.ZipFile(fountain_zip_path, "r") as zip_ref: + zip_ref.extractall(os.path.dirname(fountain_path)) + os.remove(fountain_zip_path) + return fountain_path + + +def get_non_manifold_edge_mesh(): + verts = np.array( + [[-1, 0, 0], [0, 1, 0], [1, 0, 0], [0, -1, 0], [0, 0, 1]], + dtype=np.float64, + ) + triangles = np.array([[0, 1, 3], [1, 2, 3], [1, 3, 4]]) + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector(verts) + mesh.triangles = o3d.utility.Vector3iVector(triangles) + mesh.compute_vertex_normals() + mesh.rotate(mesh.get_rotation_matrix_from_xyz((np.pi / 4, 0, np.pi / 4))) + return mesh + + +def get_non_manifold_vertex_mesh(): + verts = np.array( + [ + [-1, 0, -1], + [1, 0, -1], + [0, 1, -1], + [0, 0, 0], + [-1, 0, 1], + [1, 0, 1], + [0, 1, 1], + ], + dtype=np.float64, + ) + triangles = np.array([ + [0, 1, 2], + [0, 1, 3], + [1, 2, 3], + [2, 0, 3], + [4, 5, 6], + [4, 5, 3], + [5, 6, 3], + [4, 6, 3], + ]) + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector(verts) + mesh.triangles = o3d.utility.Vector3iVector(triangles) + mesh.compute_vertex_normals() + mesh.rotate(mesh.get_rotation_matrix_from_xyz((np.pi / 4, 0, np.pi / 4))) + return mesh + + +def get_open_box_mesh(): + mesh = o3d.geometry.TriangleMesh.create_box() + mesh.triangles = o3d.utility.Vector3iVector(np.asarray(mesh.triangles)[:-2]) + mesh.compute_vertex_normals() + mesh.rotate( + mesh.get_rotation_matrix_from_xyz((0.8 * np.pi, 0, 0.66 * np.pi))) + return mesh + + +def get_intersecting_boxes_mesh(): + mesh0 = o3d.geometry.TriangleMesh.create_box() + T = np.eye(4) + T[:, 3] += (0.5, 0.5, 0.5, 0) + mesh1 = o3d.geometry.TriangleMesh.create_box() + mesh1.transform(T) + mesh = mesh0 + mesh1 + mesh.compute_vertex_normals() + mesh.rotate(mesh.get_rotation_matrix_from_xyz( + (0.7 * np.pi, 0, 0.6 * np.pi))) + return mesh + + +def get_armadillo_mesh(): + armadillo_path = _relative_path("../TestData/Armadillo.ply") + if not os.path.exists(armadillo_path): + print("downloading armadillo mesh") + url = "http://graphics.stanford.edu/pub/3Dscanrep/armadillo/Armadillo.ply.gz" + urllib.request.urlretrieve(url, armadillo_path + ".gz") + print("extract armadillo mesh") + with gzip.open(armadillo_path + ".gz", "rb") as fin: + with open(armadillo_path, "wb") as fout: + shutil.copyfileobj(fin, fout) + os.remove(armadillo_path + ".gz") + mesh = o3d.io.read_triangle_mesh(armadillo_path) + mesh.compute_vertex_normals() + return mesh + + +def get_bunny_mesh(): + bunny_path = _relative_path("../TestData/Bunny.ply") + if not os.path.exists(bunny_path): + print("downloading bunny mesh") + url = "http://graphics.stanford.edu/pub/3Dscanrep/bunny.tar.gz" + urllib.request.urlretrieve(url, bunny_path + ".tar.gz") + print("extract bunny mesh") + with tarfile.open(bunny_path + ".tar.gz") as tar: + tar.extractall(path=os.path.dirname(bunny_path)) + shutil.move( + os.path.join( + os.path.dirname(bunny_path), + "bunny", + "reconstruction", + "bun_zipper.ply", + ), + bunny_path, + ) + os.remove(bunny_path + ".tar.gz") + shutil.rmtree(os.path.join(os.path.dirname(bunny_path), "bunny")) + mesh = o3d.io.read_triangle_mesh(bunny_path) + mesh.compute_vertex_normals() + return mesh + + +def get_knot_mesh(): + mesh = o3d.io.read_triangle_mesh(_relative_path("../TestData/knot.ply")) + mesh.compute_vertex_normals() + return mesh + + +def get_eagle_pcd(): + path = _relative_path("../TestData/eagle.ply") + if not os.path.exists(path): + print("downloading eagle pcl") + url = "http://www.cs.jhu.edu/~misha/Code/PoissonRecon/eagle.points.ply" + urllib.request.urlretrieve(url, path) + pcd = o3d.io.read_point_cloud(path) + return pcd diff --git a/examples/TestData/my_points.txt b/examples/TestData/my_points.txt new file mode 100644 index 00000000000..000f4bd9cbf --- /dev/null +++ b/examples/TestData/my_points.txt @@ -0,0 +1,4 @@ +0.0000000000 0.0000000000 0.0000000000 +1.0000000000 0.0000000000 0.0000000000 +0.0000000000 1.0000000000 0.0000000000 +0.0000000000 0.0000000000 1.0000000000 diff --git a/src/Open3D/Geometry/MeshBase.h b/src/Open3D/Geometry/MeshBase.h index 1b570c5b10a..f8d9dbbfa27 100644 --- a/src/Open3D/Geometry/MeshBase.h +++ b/src/Open3D/Geometry/MeshBase.h @@ -69,6 +69,12 @@ class MeshBase : public Geometry3D { /// \param Vertex indicates that only the vertex positions are filtered. enum class FilterScope { All, Color, Normal, Vertex }; + /// Energy model that is minimized in the DeformAsRigidAsPossible method. + /// \param Spokes is the original energy as formulated in + /// Sorkine and Alexa, "As-Rigid-As-Possible Surface Modeling", 2007. + /// \param Smoothed adds a rotation smoothing term to the rotations. + enum class DeformAsRigidAsPossibleEnergy { Spokes, Smoothed }; + /// \brief Default Constructor. MeshBase() : Geometry3D(Geometry::GeometryType::MeshBase) {} ~MeshBase() override {} diff --git a/src/Open3D/Geometry/TriangleMesh.h b/src/Open3D/Geometry/TriangleMesh.h index 95caf336312..0263b620daf 100644 --- a/src/Open3D/Geometry/TriangleMesh.h +++ b/src/Open3D/Geometry/TriangleMesh.h @@ -194,7 +194,7 @@ class TriangleMesh : public MeshBase { /// \brief Function to smooth triangle mesh using Laplacian. /// - /// $v_o = v_i \cdot \lambda (sum_{n \in N} w_n v_n - v_i)$, + /// $v_o = v_i \cdot \lambda (\sum_{n \in N} w_n v_n - v_i)$, /// with $v_i$ being the input value, $v_o$ the output value, $N$ is the /// set of adjacent neighbours, $w_n$ is the weighting of the neighbour /// based on the inverse distance (closer neighbours have higher weight), @@ -267,7 +267,7 @@ class TriangleMesh : public MeshBase { bool IsIntersecting(const TriangleMesh &other) const; /// Function that tests if the given triangle mesh is orientable, i.e. - /// the triangles can oriented in such a way that all normals point + /// the triangles can be oriented in such a way that all normals point /// towards the outside. bool IsOrientable() const; @@ -485,11 +485,17 @@ class TriangleMesh : public MeshBase { /// \param constraint_vertex_positions Vertex positions used for the /// constraints. /// \param max_iter maximum number of iterations to minimize energy - /// functional. \return The deformed TriangleMesh + /// functional. + /// \param energy energy model that should be optimized + /// \param smoothed_alpha alpha parameter of the smoothed ARAP model + /// \return The deformed TriangleMesh std::shared_ptr DeformAsRigidAsPossible( const std::vector &constraint_vertex_indices, const std::vector &constraint_vertex_positions, - size_t max_iter) const; + size_t max_iter, + DeformAsRigidAsPossibleEnergy energy = + DeformAsRigidAsPossibleEnergy::Spokes, + double smoothed_alpha = 0.01) const; /// \brief Alpha shapes are a generalization of the convex hull. With /// decreasing alpha value the shape schrinks and creates cavities. diff --git a/src/Open3D/Geometry/TriangleMeshDeformation.cpp b/src/Open3D/Geometry/TriangleMeshDeformation.cpp index 819b94fe5ce..dd37f256f65 100644 --- a/src/Open3D/Geometry/TriangleMeshDeformation.cpp +++ b/src/Open3D/Geometry/TriangleMeshDeformation.cpp @@ -38,7 +38,9 @@ namespace geometry { std::shared_ptr TriangleMesh::DeformAsRigidAsPossible( const std::vector &constraint_vertex_indices, const std::vector &constraint_vertex_positions, - size_t max_iter) const { + size_t max_iter, + DeformAsRigidAsPossibleEnergy energy_model, + double smoothed_alpha) const { auto prime = std::make_shared(); prime->vertices_ = this->vertices_; prime->triangles_ = this->triangles_; @@ -48,7 +50,6 @@ std::shared_ptr TriangleMesh::DeformAsRigidAsPossible( auto edges_to_vertices = prime->GetEdgeToVerticesMap(); auto edge_weights = prime->ComputeEdgeWeightsCot(edges_to_vertices, /*min_weight=*/0); - std::vector Rs(vertices_.size()); utility::LogDebug("[DeformAsRigidAsPossible] done setting up S'"); std::unordered_map constraints; @@ -59,6 +60,16 @@ std::shared_ptr TriangleMesh::DeformAsRigidAsPossible( constraint_vertex_positions[idx]; } + double surface_area = -1; + // std::vector Rs(vertices_.size(), + // Eigen::Matrix3d::Identity()); + std::vector Rs(vertices_.size()); + std::vector Rs_old; + if (energy_model == DeformAsRigidAsPossibleEnergy::Smoothed) { + surface_area = prime->GetSurfaceArea(); + Rs_old.resize(vertices_.size()); + } + // Build system matrix L and its solver utility::LogDebug("[DeformAsRigidAsPossible] setting up system matrix L"); std::vector> triplets; @@ -84,7 +95,6 @@ std::shared_ptr TriangleMesh::DeformAsRigidAsPossible( utility::LogDebug("[DeformAsRigidAsPossible] setting up sparse solver"); Eigen::SparseLU> solver; - // Eigen::SuperLU> solver; solver.analyzePattern(L); solver.factorize(L); if (solver.info() != Eigen::Success) { @@ -99,17 +109,32 @@ std::shared_ptr TriangleMesh::DeformAsRigidAsPossible( Eigen::VectorXd(vertices_.size()), Eigen::VectorXd(vertices_.size())}; for (size_t iter = 0; iter < max_iter; ++iter) { + if (energy_model == DeformAsRigidAsPossibleEnergy::Smoothed) { + std::swap(Rs, Rs_old); + } + #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif for (int i = 0; i < int(vertices_.size()); ++i) { // Update rotations Eigen::Matrix3d S = Eigen::Matrix3d::Zero(); + Eigen::Matrix3d R = Eigen::Matrix3d::Zero(); + int n_nbs = 0; for (int j : prime->adjacency_list_[i]) { Eigen::Vector3d e0 = vertices_[i] - vertices_[j]; Eigen::Vector3d e1 = prime->vertices_[i] - prime->vertices_[j]; double w = edge_weights[GetOrderedEdge(i, j)]; S += w * (e0 * e1.transpose()); + if (energy_model == DeformAsRigidAsPossibleEnergy::Smoothed) { + R += Rs_old[j]; + } + n_nbs++; + } + if (energy_model == DeformAsRigidAsPossibleEnergy::Smoothed && + iter > 0 && n_nbs > 0) { + S = 2 * S + + (4 * smoothed_alpha * surface_area / n_nbs) * R.transpose(); } Eigen::JacobiSVD svd( S, Eigen::ComputeFullU | Eigen::ComputeFullV); @@ -161,6 +186,7 @@ std::shared_ptr TriangleMesh::DeformAsRigidAsPossible( // Compute energy and log double energy = 0; + double reg = 0; for (int i = 0; i < int(vertices_.size()); ++i) { for (int j : prime->adjacency_list_[i]) { double w = edge_weights[GetOrderedEdge(i, j)]; @@ -168,8 +194,14 @@ std::shared_ptr TriangleMesh::DeformAsRigidAsPossible( Eigen::Vector3d e1 = prime->vertices_[i] - prime->vertices_[j]; Eigen::Vector3d diff = e1 - Rs[i] * e0; energy += w * diff.squaredNorm(); + if (energy_model == DeformAsRigidAsPossibleEnergy::Smoothed) { + reg += (Rs[i] - Rs[j]).squaredNorm(); + } } } + if (energy_model == DeformAsRigidAsPossibleEnergy::Smoothed) { + energy = energy + smoothed_alpha * surface_area * reg; + } utility::LogDebug("[DeformAsRigidAsPossible] iter={}, energy={:e}", iter, energy); } diff --git a/src/Open3D/Geometry/TriangleMeshFactory.cpp b/src/Open3D/Geometry/TriangleMeshFactory.cpp index 22846836d2e..75729fbb037 100644 --- a/src/Open3D/Geometry/TriangleMeshFactory.cpp +++ b/src/Open3D/Geometry/TriangleMeshFactory.cpp @@ -433,9 +433,7 @@ std::shared_ptr TriangleMesh::CreateCoordinateFrame( mesh_arrow->Transform(transformation); *mesh_frame += *mesh_arrow; - transformation = Eigen::Matrix4d::Identity(); - transformation.block<3, 1>(0, 3) = origin; - mesh_frame->Transform(transformation); + mesh_frame->Translate(origin, false); return mesh_frame; } diff --git a/src/Open3D/Utility/Console.h b/src/Open3D/Utility/Console.h index 4a289420aed..57d61f4287f 100644 --- a/src/Open3D/Utility/Console.h +++ b/src/Open3D/Utility/Console.h @@ -92,27 +92,26 @@ class Logger { void VWarning(const char *format, fmt::format_args args) const { if (verbosity_level_ >= VerbosityLevel::Warning) { - ChangeConsoleColor(TextColor::Yellow, 1); - fmt::print("[Open3D WARNING] "); - fmt::vprint(format, args); - fmt::print("\n"); - ResetConsoleColor(); + std::string err_msg = fmt::vformat(format, args); + err_msg = fmt::format("[Open3D WARNING] {}", err_msg); + err_msg = ColorString(err_msg, TextColor::Yellow, 1); + print_fcn_(err_msg); } } void VInfo(const char *format, fmt::format_args args) const { if (verbosity_level_ >= VerbosityLevel::Info) { - fmt::print("[Open3D INFO] "); - fmt::vprint(format, args); - fmt::print("\n"); + std::string err_msg = fmt::vformat(format, args); + err_msg = fmt::format("[Open3D INFO] {}", err_msg); + print_fcn_(err_msg); } } void VDebug(const char *format, fmt::format_args args) const { if (verbosity_level_ >= VerbosityLevel::Debug) { - fmt::print("[Open3D DEBUG] "); - fmt::vprint(format, args); - fmt::print("\n"); + std::string err_msg = fmt::vformat(format, args); + err_msg = fmt::format("[Open3D DEBUG] {}", err_msg); + print_fcn_(err_msg); } } @@ -136,43 +135,6 @@ class Logger { VDebug(format, fmt::make_format_args(args...)); } - template - void Errorf[[noreturn]](const char *format, const Args &... args) const { - std::string err_msg = fmt::sprintf(format, args...); - err_msg = fmt::format("[Open3D Error] {}", err_msg); - err_msg = ColorString(err_msg, TextColor::Red, 1); - throw std::runtime_error(err_msg); - } - - template - void Warningf(const char *format, const Args &... args) const { - if (verbosity_level_ >= VerbosityLevel::Warning) { - ChangeConsoleColor(TextColor::Yellow, 1); - fmt::print("[Open3D WARNING] "); - fmt::printf(format, args...); - ResetConsoleColor(); - fmt::print("\n"); - } - } - - template - void Infof(const char *format, const Args &... args) const { - if (verbosity_level_ >= VerbosityLevel::Info) { - fmt::print("[Open3D INFO] "); - fmt::printf(format, args...); - fmt::print("\n"); - } - } - - template - void Debugf(const char *format, const Args &... args) const { - if (verbosity_level_ >= VerbosityLevel::Debug) { - fmt::print("[Open3D DEBUG] "); - fmt::printf(format, args...); - fmt::print("\n"); - } - } - protected: /// Internal function to change text color for the console /// Note there is no security check for parameters. @@ -187,6 +149,11 @@ class Logger { public: VerbosityLevel verbosity_level_; + std::function print_fcn_ = + [](const std::string &msg) { + fmt::print(msg); + fmt::print("\n"); + }; }; /// Set global verbosity level of Open3D @@ -222,25 +189,21 @@ inline void LogDebug(const char *format, const Args &... args) { Logger::i().VDebug(format, fmt::make_format_args(args...)); } -template -inline void LogErrorf[[noreturn]](const char *format, const Args &... args) { - Logger::i().Errorf(format, args...); -} +class VerbosityContextManager { +public: + VerbosityContextManager(VerbosityLevel level) : level_(level) {} -template -inline void LogWarningf(const char *format, const Args &... args) { - Logger::i().Warningf(format, args...); -} + void enter() { + level_backup_ = Logger::i().verbosity_level_; + Logger::i().verbosity_level_ = level_; + } -template -inline void LogInfof(const char *format, const Args &... args) { - Logger::i().Infof(format, args...); -} + void exit() { Logger::i().verbosity_level_ = level_backup_; } -template -inline void LogDebugf(const char *format, const Args &... args) { - Logger::i().Debugf(format, args...); -} +private: + VerbosityLevel level_; + VerbosityLevel level_backup_; +}; class ConsoleProgressBar { public: diff --git a/src/Open3D/Visualization/Utility/DrawGeometry.cpp b/src/Open3D/Visualization/Utility/DrawGeometry.cpp index 4f12cbace0f..5e9e8cb7a27 100644 --- a/src/Open3D/Visualization/Utility/DrawGeometry.cpp +++ b/src/Open3D/Visualization/Utility/DrawGeometry.cpp @@ -46,7 +46,9 @@ bool DrawGeometries(const std::vector> int height /* = 480*/, int left /* = 50*/, int top /* = 50*/, - bool point_show_normal /* = false */) { + bool point_show_normal /* = false */, + bool mesh_show_wireframe /* = false */, + bool mesh_show_back_face /* = false */) { Visualizer visualizer; if (visualizer.CreateVisualizerWindow(window_name, width, height, left, top) == false) { @@ -54,6 +56,8 @@ bool DrawGeometries(const std::vector> return false; } visualizer.GetRenderOption().point_show_normal_ = point_show_normal; + visualizer.GetRenderOption().mesh_show_wireframe_ = mesh_show_wireframe; + visualizer.GetRenderOption().mesh_show_back_face_ = mesh_show_back_face; for (const auto &geometry_ptr : geometry_ptrs) { if (visualizer.AddGeometry(geometry_ptr) == false) { utility::LogWarning("[DrawGeometries] Failed adding geometry."); diff --git a/src/Open3D/Visualization/Utility/DrawGeometry.h b/src/Open3D/Visualization/Utility/DrawGeometry.h index 1a399051751..b2dce9efc7b 100644 --- a/src/Open3D/Visualization/Utility/DrawGeometry.h +++ b/src/Open3D/Visualization/Utility/DrawGeometry.h @@ -53,6 +53,9 @@ class Visualizer; /// \param left margin of the visualization window. /// \param top The top margin of the visualization window. /// \param point_show_normal visualize point normals if set to true. +/// \param mesh_show_wireframe visualize mesh wireframe if set to true. +/// \param mesh_show_back_face visualize also the back face of the mesh +/// triangles. bool DrawGeometries(const std::vector> &geometry_ptrs, const std::string &window_name = "Open3D", @@ -60,7 +63,9 @@ bool DrawGeometries(const std::vector> int height = 480, int left = 50, int top = 50, - bool point_show_normal = false); + bool point_show_normal = false, + bool mesh_show_wireframe = false, + bool mesh_show_back_face = false); /// \brief Function to draw a list of geometry objects with a GUI that /// supports animation. diff --git a/src/Open3D/Visualization/Visualizer/ViewControl.cpp b/src/Open3D/Visualization/Visualizer/ViewControl.cpp index 8e416ec227b..a2112d03d49 100644 --- a/src/Open3D/Visualization/Visualizer/ViewControl.cpp +++ b/src/Open3D/Visualization/Visualizer/ViewControl.cpp @@ -123,6 +123,26 @@ bool ViewControl::ConvertFromViewParameters(const ViewParameters &status) { return true; } +void ViewControl::SetLookat(const Eigen::Vector3d &lookat) { + lookat_ = lookat; + SetProjectionParameters(); +} + +void ViewControl::SetUp(const Eigen::Vector3d &up) { + up_ = up; + SetProjectionParameters(); +} + +void ViewControl::SetFront(const Eigen::Vector3d &front) { + front_ = front; + SetProjectionParameters(); +} + +void ViewControl::SetZoom(const double zoom) { + zoom_ = zoom; + SetProjectionParameters(); +} + bool ViewControl::ConvertToPinholeCameraParameters( camera::PinholeCameraParameters ¶meters) { if (window_height_ <= 0 || window_width_ <= 0) { diff --git a/src/Open3D/Visualization/Visualizer/ViewControl.h b/src/Open3D/Visualization/Visualizer/ViewControl.h index 5eda8623824..68cd0244ee6 100644 --- a/src/Open3D/Visualization/Visualizer/ViewControl.h +++ b/src/Open3D/Visualization/Visualizer/ViewControl.h @@ -70,6 +70,11 @@ class ViewControl { bool ConvertToViewParameters(ViewParameters &status) const; bool ConvertFromViewParameters(const ViewParameters &status); + void SetLookat(const Eigen::Vector3d &lookat); + void SetUp(const Eigen::Vector3d &up); + void SetFront(const Eigen::Vector3d &front); + void SetZoom(const double zoom); + /// Function to get equivalent pinhole camera parameters (does not support /// orthogonal since it is not a real camera view). /// diff --git a/src/Python/open3d/__init__.py b/src/Python/open3d/__init__.py index 29579f2b56d..f522627647b 100644 --- a/src/Python/open3d/__init__.py +++ b/src/Python/open3d/__init__.py @@ -32,17 +32,19 @@ except: pass -from .open3d import * # py2 py3 compatible +from .open3d import * # py2 py3 compatible -__version__ = '@PROJECT_VERSION@' +__version__ = "@PROJECT_VERSION@" if "@ENABLE_JUPYTER@" == "ON": from .j_visualizer import * def _jupyter_nbextension_paths(): - return [{ - 'section': 'notebook', - 'src': 'static', - 'dest': 'open3d', - 'require': 'open3d/extension' - }] + return [ + { + "section": "notebook", + "src": "static", + "dest": "open3d", + "require": "open3d/extension", + } + ] diff --git a/src/Python/open3d_pybind/geometry/meshbase.cpp b/src/Python/open3d_pybind/geometry/meshbase.cpp index fd720a71aa1..52a51d0090c 100644 --- a/src/Python/open3d_pybind/geometry/meshbase.cpp +++ b/src/Python/open3d_pybind/geometry/meshbase.cpp @@ -42,6 +42,7 @@ void pybind_meshbase(py::module &m) { "may also contain vertex normals and vertex colors."); py::detail::bind_default_constructor(meshbase); py::detail::bind_copy_functions(meshbase); + py::enum_( m, "SimplificationContraction") .value("Average", @@ -52,6 +53,7 @@ void pybind_meshbase(py::module &m) { "The vertex positions are computed by minimizing the " "distance to the adjacent triangle planes.") .export_values(); + py::enum_(m, "FilterScope") .value("All", geometry::MeshBase::FilterScope::All, "All properties (color, normal, vertex position) are " @@ -63,6 +65,18 @@ void pybind_meshbase(py::module &m) { .value("Vertex", geometry::MeshBase::FilterScope::Vertex, "Only the vertex positions are filtered.") .export_values(); + + py::enum_( + m, "DeformAsRigidAsPossibleEnergy") + .value("Spokes", + geometry::MeshBase::DeformAsRigidAsPossibleEnergy::Spokes, + "is the original energy as formulated in orkine and Alexa, " + "\"As-Rigid-As-Possible Surface Modeling\", 2007.") + .value("Smoothed", + geometry::MeshBase::DeformAsRigidAsPossibleEnergy::Smoothed, + "adds a rotation smoothing term to the rotations.") + .export_values(); + meshbase.def("__repr__", [](const geometry::MeshBase &mesh) { return std::string("geometry::MeshBase with ") + diff --git a/src/Python/open3d_pybind/geometry/trianglemesh.cpp b/src/Python/open3d_pybind/geometry/trianglemesh.cpp index b1b20c8546f..5a137627291 100644 --- a/src/Python/open3d_pybind/geometry/trianglemesh.cpp +++ b/src/Python/open3d_pybind/geometry/trianglemesh.cpp @@ -322,7 +322,10 @@ void pybind_trianglemesh(py::module &m) { "and Alexa, " "'As-Rigid-As-Possible Surface Modeling', 2007", "constraint_vertex_indices"_a, "constraint_vertex_positions"_a, - "max_iter"_a) + "max_iter"_a, + "energy"_a = geometry::MeshBase:: + DeformAsRigidAsPossibleEnergy::Spokes, + "smoothed_alpha"_a = 0.01) .def_static("create_from_point_cloud_alpha_shape", [](const geometry::PointCloud &pcd, double alpha) { return geometry::TriangleMesh:: @@ -648,7 +651,12 @@ void pybind_trianglemesh(py::module &m) { {"constraint_vertex_positions", "Vertex positions used for the constraints."}, {"max_iter", - "Maximum number of iterations to minimize energy functional."}}); + "Maximum number of iterations to minimize energy functional."}, + {"energy", + "Energy model that is minimized in the deformation process"}, + {"smoothed_alpha", + "trade-off parameter for the smoothed energy functional for the " + "regularization term."}}); docstring::ClassMethodDocInject( m, "TriangleMesh", "create_from_point_cloud_alpha_shape", {{"pcd", diff --git a/src/Python/open3d_pybind/open3d_pybind.cpp b/src/Python/open3d_pybind/open3d_pybind.cpp index 01f23126bc1..454aaea8541 100644 --- a/src/Python/open3d_pybind/open3d_pybind.cpp +++ b/src/Python/open3d_pybind/open3d_pybind.cpp @@ -35,7 +35,13 @@ #include "open3d_pybind/utility/utility.h" #include "open3d_pybind/visualization/visualization.h" +#include "Open3D/Utility/Console.h" + PYBIND11_MODULE(open3d, m) { + open3d::utility::Logger::i().print_fcn_ = [](const std::string& msg) { + py::print(msg); + }; + m.doc() = "Python binding of Open3D"; // Check open3d CXX11_ABI with diff --git a/src/Python/open3d_pybind/utility/console.cpp b/src/Python/open3d_pybind/utility/console.cpp index 265cd011e8d..4880281c3e8 100644 --- a/src/Python/open3d_pybind/utility/console.cpp +++ b/src/Python/open3d_pybind/utility/console.cpp @@ -31,7 +31,7 @@ using namespace open3d; -void pybind_console(py::module &m) { +void pybind_console(py::module& m) { py::enum_ vl(m, "VerbosityLevel", py::arithmetic(), "VerbosityLevel"); vl.value("Error", utility::VerbosityLevel::Error) @@ -57,4 +57,20 @@ void pybind_console(py::module &m) { m.def("get_verbosity_level", &utility::GetVerbosityLevel, "Get global verbosity level of Open3D"); docstring::FunctionDocInject(m, "get_verbosity_level"); + + py::class_(m, "VerbosityContextManager", + "A context manager to " + "temporally change the " + "verbosity level of Open3D") + .def(py::init(), + "Create a VerbosityContextManager with a given VerbosityLevel", + "level"_a) + .def("__enter__", + [&](utility::VerbosityContextManager& cm) { cm.enter(); }, + "Enter the context manager") + .def("__exit__", + [&](utility::VerbosityContextManager& cm, + pybind11::object exc_type, pybind11::object exc_value, + pybind11::object traceback) { cm.exit(); }, + "Exit the context manager"); } diff --git a/src/Python/open3d_pybind/visualization/utility.cpp b/src/Python/open3d_pybind/visualization/utility.cpp index e4ffbada7df..91a3a949995 100644 --- a/src/Python/open3d_pybind/visualization/utility.cpp +++ b/src/Python/open3d_pybind/visualization/utility.cpp @@ -105,6 +105,10 @@ static const std::unordered_map {"width", "The width of the visualization window."}, {"point_show_normal", "Visualize point normals if set to true."}, + {"mesh_show_wireframe", + "Visualize mesh wireframe if set to true."}, + {"mesh_show_back_face", + "Visualize also the back face of the mesh triangles."}, {"window_name", "The displayed title of the visualization window."}}; @@ -113,18 +117,21 @@ void pybind_visualization_utility_methods(py::module &m) { [](const std::vector> &geometry_ptrs, const std::string &window_name, int width, int height, int left, - int top, bool point_show_normal) { + int top, bool point_show_normal, bool mesh_show_wireframe, + bool mesh_show_back_face) { std::string current_dir = utility::filesystem::GetWorkingDirectory(); - visualization::DrawGeometries(geometry_ptrs, window_name, width, - height, left, top, - point_show_normal); + visualization::DrawGeometries( + geometry_ptrs, window_name, width, height, left, top, + point_show_normal, mesh_show_wireframe, + mesh_show_back_face); utility::filesystem::ChangeWorkingDirectory(current_dir); }, "Function to draw a list of geometry::Geometry objects", "geometry_list"_a, "window_name"_a = "Open3D", "width"_a = 1920, "height"_a = 1080, "left"_a = 50, "top"_a = 50, - "point_show_normal"_a = false); + "point_show_normal"_a = false, "mesh_show_wireframe"_a = false, + "mesh_show_back_face"_a = false); docstring::FunctionDocInject(m, "draw_geometries", map_shared_argument_docstrings); diff --git a/src/Python/open3d_pybind/visualization/viewcontrol.cpp b/src/Python/open3d_pybind/visualization/viewcontrol.cpp index c327033825f..10ca40fe83e 100644 --- a/src/Python/open3d_pybind/visualization/viewcontrol.cpp +++ b/src/Python/open3d_pybind/visualization/viewcontrol.cpp @@ -105,7 +105,15 @@ void pybind_viewcontrol(py::module &m) { &visualization::ViewControl::UnsetConstantZFar, "Function to remove a previously set constant z far value, " "i.e., far z-plane of the visualizer is dynamically set " - "dependent on zoom and bounding box size."); + "dependent on zoom and bounding box size.") + .def("set_lookat", &visualization::ViewControl::SetLookat, + "Set the lookat vector of the visualizer", "lookat"_a) + .def("set_up", &visualization::ViewControl::SetUp, + "Set the up vector of the visualizer", "up"_a) + .def("set_front", &visualization::ViewControl::SetFront, + "Set the front vector of the visualizer", "front"_a) + .def("set_zoom", &visualization::ViewControl::SetZoom, + "Set the zoom of the visualizer", "zoom"_a); docstring::ClassMethodDocInject(m, "ViewControl", "change_field_of_view", map_view_control_docstrings); docstring::ClassMethodDocInject(m, "ViewControl", diff --git a/src/Python/requirements.txt b/src/Python/requirements.txt index ced8bf15a8b..546896456c6 100644 --- a/src/Python/requirements.txt +++ b/src/Python/requirements.txt @@ -2,3 +2,4 @@ ipywidgets widgetsnbextension notebook numpy +matplotlib diff --git a/src/UnitTest/Geometry/TriangleMesh.cpp b/src/UnitTest/Geometry/TriangleMesh.cpp index 7a885c4dcb8..c33ff87be41 100644 --- a/src/UnitTest/Geometry/TriangleMesh.cpp +++ b/src/UnitTest/Geometry/TriangleMesh.cpp @@ -2079,19 +2079,30 @@ TEST(TriangleMesh, CreateMeshArrow) { } TEST(TriangleMesh, CreateMeshCoordinateFrame) { - vector ref_vertices = { - {0.000000, 0.000000, 0.006000}, {0.000939, 0.000000, 0.005926}, - {0.000927, 0.000147, 0.005926}, {-0.000000, -0.002724, 0.005346}, - {0.000426, -0.002690, 0.005346}, {-0.000000, -0.003527, 0.004854}, - {-0.005346, 0.000000, 0.002724}, {-0.005280, -0.000836, 0.002724}, - {-0.005706, 0.000000, 0.001854}, {0.000000, 0.005926, -0.000939}, - {-0.000927, 0.005853, -0.000939}, {0.000000, 0.005706, -0.001854}, - {0.004243, 0.000000, -0.004243}, {0.004190, 0.000664, -0.004243}, - {0.003527, 0.000000, -0.004854}, {-0.000000, -0.001854, -0.005706}, - {0.000290, -0.001831, -0.005706}, {-0.000000, -0.000939, -0.005926}, - {0.000000, 0.080000, 0.003500}, {0.001082, 0.080000, 0.003329}, - {0.000000, 0.060000, 0.003500}, {-0.003500, 0.000000, 0.060000}, - {-0.003329, -0.001082, 0.060000}, {-0.003500, 0.000000, 0.040000}}; + vector ref_vertices = {{-0.00516755, -0.00516755, 0.000832451}, + {-0.00422894, -0.00516755, 0.000758582}, + {-0.0042405, -0.00502072, 0.000758582}, + {-0.00516755, -0.00789149, 0.000178491}, + {-0.00474143, -0.00785796, 0.000178491}, + {-0.00516755, -0.00869426, -0.000313447}, + {-0.0105136, -0.00516755, -0.00244361}, + {-0.0104478, -0.00600385, -0.00244361}, + {-0.0108739, -0.00516755, -0.00331345}, + {-0.00516755, 0.000758582, -0.00610616}, + {-0.0060946, 0.000685621, -0.00610616}, + {-0.00516755, 0.000538791, -0.00702165}, + {-0.000924908, -0.00516755, -0.00941019}, + {-0.000977142, -0.00450385, -0.00941019}, + {-0.00164084, -0.00516755, -0.0100217}, + {-0.00516755, -0.00702165, -0.0108739}, + {-0.0048775, -0.00699882, -0.0108739}, + {-0.00516755, -0.00610616, -0.0110937}, + {-0.00516755, 0.0748325, -0.00166755}, + {-0.00408599, 0.0748325, -0.00183885}, + {-0.00516755, 0.0548325, -0.00166755}, + {-0.00866755, -0.00516755, 0.0548325}, + {-0.00849625, -0.00624911, 0.0548325}, + {-0.00866755, -0.00516755, 0.0348325}}; vector ref_vertex_normals = { {0.000000, 0.000000, 1.000000}, {0.182067, -0.006090, 0.983267}, diff --git a/util/scripts/git_enable_ipynb_filter.sh b/util/scripts/git_enable_ipynb_filter.sh new file mode 100755 index 00000000000..066022a828f --- /dev/null +++ b/util/scripts/git_enable_ipynb_filter.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +# This has to be run only once to add the jupyter notebook filter to the local +# config. +git config --local include.path ../.gitfilters diff --git a/util/scripts/make-documentation.sh b/util/scripts/make-documentation.sh index d699dd47de0..c71fe306f56 100755 --- a/util/scripts/make-documentation.sh +++ b/util/scripts/make-documentation.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Documentation build scripts for CI # +# To build documentation locally, ignore the xvfb-run and arguments. +# # Prerequisites: # pip install sphinx sphinx-autobuild # sudo apt-get -y install doxygen @@ -9,5 +11,5 @@ set -e curr_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" pushd ${curr_dir}/../../docs -python make_docs.py --sphinx --doxyge +python make_docs.py --clean_notebooks --execute_notebooks=auto --sphinx --doxyge popd