From b145dfa4de9f803011a2a05b80617e45217364b1 Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Fri, 19 Nov 2021 13:43:43 -0500 Subject: [PATCH 01/15] Reimport of plotter3.py plotter3.py is an additional plotter function that functions similarly to plotter.py, though is meant to visualize targets and measurements in 3D as opposed to 2D planes. Currently, only truth visualization is supported. --- .eggs/README.txt | 6 + .../packaging-21.3-py3.8.egg/EGG-INFO/LICENSE | 3 + .../EGG-INFO/LICENSE.APACHE | 177 + .../EGG-INFO/LICENSE.BSD | 23 + .../EGG-INFO/PKG-INFO | 453 ++ .../packaging-21.3-py3.8.egg/EGG-INFO/RECORD | 19 + .eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL | 5 + .../EGG-INFO/requires.txt | 1 + .../EGG-INFO/top_level.txt | 1 + .../packaging/__about__.py | 26 + .../packaging/__init__.py | 25 + .../packaging/_manylinux.py | 301 + .../packaging/_musllinux.py | 136 + .../packaging/_structures.py | 61 + .../packaging/markers.py | 304 + .../packaging/py.typed | 0 .../packaging/requirements.py | 146 + .../packaging/specifiers.py | 802 +++ .../packaging/tags.py | 487 ++ .../packaging/utils.py | 136 + .../packaging/version.py | 504 ++ .../EGG-INFO/LICENSE | 18 + .../EGG-INFO/PKG-INFO | 109 + .../pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD | 17 + .../pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL | 5 + .../EGG-INFO/requires.txt | 4 + .../EGG-INFO/top_level.txt | 1 + .../pyparsing/__init__.py | 328 + .../pyparsing/actions.py | 207 + .../pyparsing/common.py | 424 ++ .../pyparsing/core.py | 5772 +++++++++++++++++ .../pyparsing/diagram/__init__.py | 593 ++ .../pyparsing/diagram/template.jinja2 | 26 + .../pyparsing/exceptions.py | 267 + .../pyparsing/helpers.py | 1059 +++ .../pyparsing/results.py | 758 +++ .../pyparsing/testing.py | 331 + .../pyparsing/unicode.py | 332 + .../pyparsing/util.py | 234 + .../EGG-INFO/LICENSE | 17 + .../EGG-INFO/PKG-INFO | 639 ++ .../EGG-INFO/RECORD | 23 + .../EGG-INFO/WHEEL | 5 + .../EGG-INFO/entry_points.txt | 37 + .../EGG-INFO/requires.txt | 6 + .../EGG-INFO/top_level.txt | 1 + .../EGG-INFO/zip-safe | 1 + .../setuptools_scm/__init__.py | 212 + .../setuptools_scm/__main__.py | 15 + .../setuptools_scm/_version_cls.py | 49 + .../setuptools_scm/config.py | 212 + .../setuptools_scm/discover.py | 58 + .../setuptools_scm/file_finder.py | 70 + .../setuptools_scm/file_finder_git.py | 93 + .../setuptools_scm/file_finder_hg.py | 49 + .../setuptools_scm/git.py | 220 + .../setuptools_scm/hacks.py | 40 + .../setuptools_scm/hg.py | 169 + .../setuptools_scm/hg_git.py | 133 + .../setuptools_scm/integration.py | 94 + .../setuptools_scm/scm_workdir.py | 15 + .../setuptools_scm/utils.py | 154 + .../setuptools_scm/version.py | 460 ++ .../EGG-INFO/LICENSE | 17 + .../EGG-INFO/PKG-INFO | 44 + .../EGG-INFO/RECORD | 7 + .../EGG-INFO/WHEEL | 6 + .../EGG-INFO/entry_points.txt | 6 + .../EGG-INFO/top_level.txt | 1 + .../setuptools_scm_git_archive/__init__.py | 21 + .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE | 21 + .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO | 208 + .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD | 9 + .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL | 4 + .eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py | 9 + .eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py | 663 ++ .eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py | 101 + .eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py | 6 + .eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed | 1 + stonesoup/plotter3.py | 327 + 80 files changed, 18324 insertions(+) create mode 100644 .eggs/README.txt create mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE create mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.APACHE create mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.BSD create mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/PKG-INFO create mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/RECORD create mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL create mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/requires.txt create mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/top_level.txt create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/__about__.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/__init__.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/_manylinux.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/_musllinux.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/_structures.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/markers.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/py.typed create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/requirements.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/specifiers.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/tags.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/utils.py create mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/version.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/LICENSE create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/PKG-INFO create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/requires.txt create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/top_level.txt create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/__init__.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/actions.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/common.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/core.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/__init__.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/template.jinja2 create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/exceptions.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/helpers.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/results.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/testing.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/unicode.py create mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/util.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/LICENSE create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/PKG-INFO create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/RECORD create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/WHEEL create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/entry_points.txt create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/requires.txt create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/top_level.txt create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/zip-safe create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__init__.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__main__.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/_version_cls.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/config.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/discover.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_git.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_hg.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/git.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hacks.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg_git.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/integration.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/scm_workdir.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/utils.py create mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/version.py create mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/LICENSE create mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/PKG-INFO create mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/RECORD create mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/WHEEL create mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/entry_points.txt create mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/top_level.txt create mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/setuptools_scm_git_archive/__init__.py create mode 100644 .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE create mode 100644 .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO create mode 100644 .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD create mode 100644 .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL create mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py create mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py create mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py create mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py create mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed create mode 100644 stonesoup/plotter3.py diff --git a/.eggs/README.txt b/.eggs/README.txt new file mode 100644 index 000000000..5d0166882 --- /dev/null +++ b/.eggs/README.txt @@ -0,0 +1,6 @@ +This directory contains eggs that were downloaded by setuptools to build, test, and run plug-ins. + +This directory caches those eggs to prevent repeated downloads. + +However, it is safe to delete this directory. + diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE new file mode 100644 index 000000000..6f62d44e4 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE @@ -0,0 +1,3 @@ +This software is made available under the terms of *either* of the licenses +found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made +under the terms of *both* these licenses. diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.APACHE b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.APACHE new file mode 100644 index 000000000..f433b1a53 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.BSD b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.BSD new file mode 100644 index 000000000..42ce7b75c --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.BSD @@ -0,0 +1,23 @@ +Copyright (c) Donald Stufft and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/PKG-INFO new file mode 100644 index 000000000..358ace536 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/PKG-INFO @@ -0,0 +1,453 @@ +Metadata-Version: 2.1 +Name: packaging +Version: 21.3 +Summary: Core utilities for Python packages +Home-page: https://github.com/pypa/packaging +Author: Donald Stufft and individual contributors +Author-email: donald@stufft.io +License: BSD-2-Clause or Apache-2.0 +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: LICENSE.APACHE +License-File: LICENSE.BSD +Requires-Dist: pyparsing (!=3.0.5,>=2.0.2) + +packaging +========= + +.. start-intro + +Reusable core utilities for various Python Packaging +`interoperability specifications `_. + +This library provides utilities that implement the interoperability +specifications which have clearly one correct behaviour (eg: :pep:`440`) +or benefit greatly from having a single shared implementation (eg: :pep:`425`). + +.. end-intro + +The ``packaging`` project includes the following: version handling, specifiers, +markers, requirements, tags, utilities. + +Documentation +------------- + +The `documentation`_ provides information and the API for the following: + +- Version Handling +- Specifiers +- Markers +- Requirements +- Tags +- Utilities + +Installation +------------ + +Use ``pip`` to install these utilities:: + + pip install packaging + +Discussion +---------- + +If you run into bugs, you can file them in our `issue tracker`_. + +You can also join ``#pypa`` on Freenode to ask questions or get involved. + + +.. _`documentation`: https://packaging.pypa.io/ +.. _`issue tracker`: https://github.com/pypa/packaging/issues + + +Code of Conduct +--------------- + +Everyone interacting in the packaging project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_. + +.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md + +Contributing +------------ + +The ``CONTRIBUTING.rst`` file outlines how to contribute to this project as +well as how to report a potential security issue. The documentation for this +project also covers information about `project development`_ and `security`_. + +.. _`project development`: https://packaging.pypa.io/en/latest/development/ +.. _`security`: https://packaging.pypa.io/en/latest/security/ + +Project History +--------------- + +Please review the ``CHANGELOG.rst`` file or the `Changelog documentation`_ for +recent changes and project history. + +.. _`Changelog documentation`: https://packaging.pypa.io/en/latest/changelog/ + +Changelog +--------- + +21.3 - 2021-11-17 +~~~~~~~~~~~~~~~~~ + +* Add a ``pp3-none-any`` tag (`#311 `__) +* Replace the blank pyparsing 3 exclusion with a 3.0.5 exclusion (`#481 `__, `#486 `__) +* Fix a spelling mistake (`#479 `__) + +21.2 - 2021-10-29 +~~~~~~~~~~~~~~~~~ + +* Update documentation entry for 21.1. + +21.1 - 2021-10-29 +~~~~~~~~~~~~~~~~~ + +* Update pin to pyparsing to exclude 3.0.0. + +21.0 - 2021-07-03 +~~~~~~~~~~~~~~~~~ + +* PEP 656: musllinux support (`#411 `__) +* Drop support for Python 2.7, Python 3.4 and Python 3.5. +* Replace distutils usage with sysconfig (`#396 `__) +* Add support for zip files in ``parse_sdist_filename`` (`#429 `__) +* Use cached ``_hash`` attribute to short-circuit tag equality comparisons (`#417 `__) +* Specify the default value for the ``specifier`` argument to ``SpecifierSet`` (`#437 `__) +* Proper keyword-only "warn" argument in packaging.tags (`#403 `__) +* Correctly remove prerelease suffixes from ~= check (`#366 `__) +* Fix type hints for ``Version.post`` and ``Version.dev`` (`#393 `__) +* Use typing alias ``UnparsedVersion`` (`#398 `__) +* Improve type inference for ``packaging.specifiers.filter()`` (`#430 `__) +* Tighten the return type of ``canonicalize_version()`` (`#402 `__) + +20.9 - 2021-01-29 +~~~~~~~~~~~~~~~~~ + +* Run `isort `_ over the code base (`#377 `__) +* Add support for the ``macosx_10_*_universal2`` platform tags (`#379 `__) +* Introduce ``packaging.utils.parse_wheel_filename()`` and ``parse_sdist_filename()`` + (`#387 `__ and `#389 `__) + +20.8 - 2020-12-11 +~~~~~~~~~~~~~~~~~ + +* Revert back to setuptools for compatibility purposes for some Linux distros (`#363 `__) +* Do not insert an underscore in wheel tags when the interpreter version number + is more than 2 digits (`#372 `__) + +20.7 - 2020-11-28 +~~~~~~~~~~~~~~~~~ + +No unreleased changes. + +20.6 - 2020-11-28 +~~~~~~~~~~~~~~~~~ + +.. note:: This release was subsequently yanked, and these changes were included in 20.7. + +* Fix flit configuration, to include LICENSE files (`#357 `__) +* Make `intel` a recognized CPU architecture for the `universal` macOS platform tag (`#361 `__) +* Add some missing type hints to `packaging.requirements` (issue:`350`) + +20.5 - 2020-11-27 +~~~~~~~~~~~~~~~~~ + +* Officially support Python 3.9 (`#343 `__) +* Deprecate the ``LegacyVersion`` and ``LegacySpecifier`` classes (`#321 `__) +* Handle ``OSError`` on non-dynamic executables when attempting to resolve + the glibc version string. + +20.4 - 2020-05-19 +~~~~~~~~~~~~~~~~~ + +* Canonicalize version before comparing specifiers. (`#282 `__) +* Change type hint for ``canonicalize_name`` to return + ``packaging.utils.NormalizedName``. + This enables the use of static typing tools (like mypy) to detect mixing of + normalized and un-normalized names. + +20.3 - 2020-03-05 +~~~~~~~~~~~~~~~~~ + +* Fix changelog for 20.2. + +20.2 - 2020-03-05 +~~~~~~~~~~~~~~~~~ + +* Fix a bug that caused a 32-bit OS that runs on a 64-bit ARM CPU (e.g. ARM-v8, + aarch64), to report the wrong bitness. + +20.1 - 2020-01-24 +~~~~~~~~~~~~~~~~~~~ + +* Fix a bug caused by reuse of an exhausted iterator. (`#257 `__) + +20.0 - 2020-01-06 +~~~~~~~~~~~~~~~~~ + +* Add type hints (`#191 `__) + +* Add proper trove classifiers for PyPy support (`#198 `__) + +* Scale back depending on ``ctypes`` for manylinux support detection (`#171 `__) + +* Use ``sys.implementation.name`` where appropriate for ``packaging.tags`` (`#193 `__) + +* Expand upon the API provided by ``packaging.tags``: ``interpreter_name()``, ``mac_platforms()``, ``compatible_tags()``, ``cpython_tags()``, ``generic_tags()`` (`#187 `__) + +* Officially support Python 3.8 (`#232 `__) + +* Add ``major``, ``minor``, and ``micro`` aliases to ``packaging.version.Version`` (`#226 `__) + +* Properly mark ``packaging`` has being fully typed by adding a `py.typed` file (`#226 `__) + +19.2 - 2019-09-18 +~~~~~~~~~~~~~~~~~ + +* Remove dependency on ``attrs`` (`#178 `__, `#179 `__) + +* Use appropriate fallbacks for CPython ABI tag (`#181 `__, `#185 `__) + +* Add manylinux2014 support (`#186 `__) + +* Improve ABI detection (`#181 `__) + +* Properly handle debug wheels for Python 3.8 (`#172 `__) + +* Improve detection of debug builds on Windows (`#194 `__) + +19.1 - 2019-07-30 +~~~~~~~~~~~~~~~~~ + +* Add the ``packaging.tags`` module. (`#156 `__) + +* Correctly handle two-digit versions in ``python_version`` (`#119 `__) + + +19.0 - 2019-01-20 +~~~~~~~~~~~~~~~~~ + +* Fix string representation of PEP 508 direct URL requirements with markers. + +* Better handling of file URLs + + This allows for using ``file:///absolute/path``, which was previously + prevented due to the missing ``netloc``. + + This allows for all file URLs that ``urlunparse`` turns back into the + original URL to be valid. + + +18.0 - 2018-09-26 +~~~~~~~~~~~~~~~~~ + +* Improve error messages when invalid requirements are given. (`#129 `__) + + +17.1 - 2017-02-28 +~~~~~~~~~~~~~~~~~ + +* Fix ``utils.canonicalize_version`` when supplying non PEP 440 versions. + + +17.0 - 2017-02-28 +~~~~~~~~~~~~~~~~~ + +* Drop support for python 2.6, 3.2, and 3.3. + +* Define minimal pyparsing version to 2.0.2 (`#91 `__). + +* Add ``epoch``, ``release``, ``pre``, ``dev``, and ``post`` attributes to + ``Version`` and ``LegacyVersion`` (`#34 `__). + +* Add ``Version().is_devrelease`` and ``LegacyVersion().is_devrelease`` to + make it easy to determine if a release is a development release. + +* Add ``utils.canonicalize_version`` to canonicalize version strings or + ``Version`` instances (`#121 `__). + + +16.8 - 2016-10-29 +~~~~~~~~~~~~~~~~~ + +* Fix markers that utilize ``in`` so that they render correctly. + +* Fix an erroneous test on Python RC releases. + + +16.7 - 2016-04-23 +~~~~~~~~~~~~~~~~~ + +* Add support for the deprecated ``python_implementation`` marker which was + an undocumented setuptools marker in addition to the newer markers. + + +16.6 - 2016-03-29 +~~~~~~~~~~~~~~~~~ + +* Add support for the deprecated, PEP 345 environment markers in addition to + the newer markers. + + +16.5 - 2016-02-26 +~~~~~~~~~~~~~~~~~ + +* Fix a regression in parsing requirements with whitespaces between the comma + separators. + + +16.4 - 2016-02-22 +~~~~~~~~~~~~~~~~~ + +* Fix a regression in parsing requirements like ``foo (==4)``. + + +16.3 - 2016-02-21 +~~~~~~~~~~~~~~~~~ + +* Fix a bug where ``packaging.requirements:Requirement`` was overly strict when + matching legacy requirements. + + +16.2 - 2016-02-09 +~~~~~~~~~~~~~~~~~ + +* Add a function that implements the name canonicalization from PEP 503. + + +16.1 - 2016-02-07 +~~~~~~~~~~~~~~~~~ + +* Implement requirement specifiers from PEP 508. + + +16.0 - 2016-01-19 +~~~~~~~~~~~~~~~~~ + +* Relicense so that packaging is available under *either* the Apache License, + Version 2.0 or a 2 Clause BSD license. + +* Support installation of packaging when only distutils is available. + +* Fix ``==`` comparison when there is a prefix and a local version in play. + (`#41 `__). + +* Implement environment markers from PEP 508. + + +15.3 - 2015-08-01 +~~~~~~~~~~~~~~~~~ + +* Normalize post-release spellings for rev/r prefixes. `#35 `__ + + +15.2 - 2015-05-13 +~~~~~~~~~~~~~~~~~ + +* Fix an error where the arbitrary specifier (``===``) was not correctly + allowing pre-releases when it was being used. + +* Expose the specifier and version parts through properties on the + ``Specifier`` classes. + +* Allow iterating over the ``SpecifierSet`` to get access to all of the + ``Specifier`` instances. + +* Allow testing if a version is contained within a specifier via the ``in`` + operator. + + +15.1 - 2015-04-13 +~~~~~~~~~~~~~~~~~ + +* Fix a logic error that was causing inconsistent answers about whether or not + a pre-release was contained within a ``SpecifierSet`` or not. + + +15.0 - 2015-01-02 +~~~~~~~~~~~~~~~~~ + +* Add ``Version().is_postrelease`` and ``LegacyVersion().is_postrelease`` to + make it easy to determine if a release is a post release. + +* Add ``Version().base_version`` and ``LegacyVersion().base_version`` to make + it easy to get the public version without any pre or post release markers. + +* Support the update to PEP 440 which removed the implied ``!=V.*`` when using + either ``>V`` or ``V`` or ````) operator. + + +14.3 - 2014-11-19 +~~~~~~~~~~~~~~~~~ + +* **BACKWARDS INCOMPATIBLE** Refactor specifier support so that it can sanely + handle legacy specifiers as well as PEP 440 specifiers. + +* **BACKWARDS INCOMPATIBLE** Move the specifier support out of + ``packaging.version`` into ``packaging.specifiers``. + + +14.2 - 2014-09-10 +~~~~~~~~~~~~~~~~~ + +* Add prerelease support to ``Specifier``. +* Remove the ability to do ``item in Specifier()`` and replace it with + ``Specifier().contains(item)`` in order to allow flags that signal if a + prerelease should be accepted or not. +* Add a method ``Specifier().filter()`` which will take an iterable and returns + an iterable with items that do not match the specifier filtered out. + + +14.1 - 2014-09-08 +~~~~~~~~~~~~~~~~~ + +* Allow ``LegacyVersion`` and ``Version`` to be sorted together. +* Add ``packaging.version.parse()`` to enable easily parsing a version string + as either a ``Version`` or a ``LegacyVersion`` depending on it's PEP 440 + validity. + + +14.0 - 2014-09-05 +~~~~~~~~~~~~~~~~~ + +* Initial release. + + +.. _`master`: https://github.com/pypa/packaging/ + + diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/RECORD b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/RECORD new file mode 100644 index 000000000..870a8eb17 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/RECORD @@ -0,0 +1,19 @@ +packaging/__about__.py,sha256=ugASIO2w1oUyH8_COqQ2X_s0rDhjbhQC3yJocD03h2c,661 +packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497 +packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488 +packaging/_musllinux.py,sha256=_KGgY_qc7vhMGpoqss25n2hiLCNKRtvz9mCrS7gkqyc,4378 +packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 +packaging/markers.py,sha256=Fygi3_eZnjQ-3VJizW5AhI5wvo0Hb6RMk4DidsKpOC0,8475 +packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +packaging/requirements.py,sha256=rjaGRCMepZS1mlYMjJ5Qh6rfq3gtsCRQUQmftGZ_bu8,4664 +packaging/specifiers.py,sha256=LRQ0kFsHrl5qfcFNEEJrIFYsnIHQUJXY9fIsakTrrqE,30110 +packaging/tags.py,sha256=lmsnGNiJ8C4D_Pf9PbM0qgbZvD9kmB9lpZBQUZa3R_Y,15699 +packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200 +packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665 +packaging-21.3.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +packaging-21.3.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +packaging-21.3.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +packaging-21.3.dist-info/METADATA,sha256=KuKIy6qDLP3svIt6ejCbxBDhvq11ebkgUN55MeyKFyc,15147 +packaging-21.3.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +packaging-21.3.dist-info/top_level.txt,sha256=zFdHrhWnPslzsiP455HutQsqPB6v0KCtNUMtUtrefDw,10 +packaging-21.3.dist-info/RECORD,, diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL new file mode 100644 index 000000000..5bad85fdc --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/requires.txt b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/requires.txt new file mode 100644 index 000000000..f6e4a46ef --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/requires.txt @@ -0,0 +1 @@ +pyparsing!=3.0.5,>=2.0.2 diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/top_level.txt b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/top_level.txt new file mode 100644 index 000000000..748809f75 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/top_level.txt @@ -0,0 +1 @@ +packaging diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/__about__.py b/.eggs/packaging-21.3-py3.8.egg/packaging/__about__.py new file mode 100644 index 000000000..3551bc2d2 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/__about__.py @@ -0,0 +1,26 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "21.3" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014-2019 %s" % __author__ diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/__init__.py b/.eggs/packaging-21.3-py3.8.egg/packaging/__init__.py new file mode 100644 index 000000000..3c50c5dcf --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/__init__.py @@ -0,0 +1,25 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from .__about__ import ( + __author__, + __copyright__, + __email__, + __license__, + __summary__, + __title__, + __uri__, + __version__, +) + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/_manylinux.py b/.eggs/packaging-21.3-py3.8.egg/packaging/_manylinux.py new file mode 100644 index 000000000..4c379aa6f --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/_manylinux.py @@ -0,0 +1,301 @@ +import collections +import functools +import os +import re +import struct +import sys +import warnings +from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple + + +# Python does not provide platform information at sufficient granularity to +# identify the architecture of the running executable in some cases, so we +# determine it dynamically by reading the information from the running +# process. This only applies on Linux, which uses the ELF format. +class _ELFFileHeader: + # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header + class _InvalidELFFileHeader(ValueError): + """ + An invalid ELF file header was found. + """ + + ELF_MAGIC_NUMBER = 0x7F454C46 + ELFCLASS32 = 1 + ELFCLASS64 = 2 + ELFDATA2LSB = 1 + ELFDATA2MSB = 2 + EM_386 = 3 + EM_S390 = 22 + EM_ARM = 40 + EM_X86_64 = 62 + EF_ARM_ABIMASK = 0xFF000000 + EF_ARM_ABI_VER5 = 0x05000000 + EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + def __init__(self, file: IO[bytes]) -> None: + def unpack(fmt: str) -> int: + try: + data = file.read(struct.calcsize(fmt)) + result: Tuple[int, ...] = struct.unpack(fmt, data) + except struct.error: + raise _ELFFileHeader._InvalidELFFileHeader() + return result[0] + + self.e_ident_magic = unpack(">I") + if self.e_ident_magic != self.ELF_MAGIC_NUMBER: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_class = unpack("B") + if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_data = unpack("B") + if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_version = unpack("B") + self.e_ident_osabi = unpack("B") + self.e_ident_abiversion = unpack("B") + self.e_ident_pad = file.read(7) + format_h = "H" + format_i = "I" + format_q = "Q" + format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q + self.e_type = unpack(format_h) + self.e_machine = unpack(format_h) + self.e_version = unpack(format_i) + self.e_entry = unpack(format_p) + self.e_phoff = unpack(format_p) + self.e_shoff = unpack(format_p) + self.e_flags = unpack(format_i) + self.e_ehsize = unpack(format_h) + self.e_phentsize = unpack(format_h) + self.e_phnum = unpack(format_h) + self.e_shentsize = unpack(format_h) + self.e_shnum = unpack(format_h) + self.e_shstrndx = unpack(format_h) + + +def _get_elf_header() -> Optional[_ELFFileHeader]: + try: + with open(sys.executable, "rb") as f: + elf_header = _ELFFileHeader(f) + except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): + return None + return elf_header + + +def _is_linux_armhf() -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_ARM + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABIMASK + ) == elf_header.EF_ARM_ABI_VER5 + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD + ) == elf_header.EF_ARM_ABI_FLOAT_HARD + return result + + +def _is_linux_i686() -> bool: + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_386 + return result + + +def _have_compatible_abi(arch: str) -> bool: + if arch == "armv7l": + return _is_linux_armhf() + if arch == "i686": + return _is_linux_i686() + return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> Optional[str]: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". + version_string = os.confstr("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.split() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> Optional[str]: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> Optional[str]: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> Tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + "Expected glibc version with 2 components major.minor," + " got: %s" % version_str, + RuntimeWarning, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache() +def _get_glibc_version() -> Tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux # noqa + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(linux: str, arch: str) -> Iterator[str]: + if not _have_compatible_abi(arch): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if arch in {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(tag, arch, glibc_version): + yield linux.replace("linux", tag) + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(legacy_tag, arch, glibc_version): + yield linux.replace("linux", legacy_tag) diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/_musllinux.py b/.eggs/packaging-21.3-py3.8.egg/packaging/_musllinux.py new file mode 100644 index 000000000..8ac3059ba --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/_musllinux.py @@ -0,0 +1,136 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +import contextlib +import functools +import operator +import os +import re +import struct +import subprocess +import sys +from typing import IO, Iterator, NamedTuple, Optional, Tuple + + +def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: + return struct.unpack(fmt, f.read(struct.calcsize(fmt))) + + +def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: + """Detect musl libc location by parsing the Python executable. + + Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca + ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html + """ + f.seek(0) + try: + ident = _read_unpacked(f, "16B") + except struct.error: + return None + if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. + return None + f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, p_fmt, p_idx = { + 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. + 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. + }[ident[4]] + except KeyError: + return None + else: + p_get = operator.itemgetter(*p_idx) + + # Find the interpreter section and return its content. + try: + _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) + except struct.error: + return None + for i in range(e_phnum + 1): + f.seek(e_phoff + e_phentsize * i) + try: + p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) + except struct.error: + return None + if p_type != 3: # Not PT_INTERP. + continue + f.seek(p_offset) + interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") + if "musl" not in interpreter: + return None + return interpreter + return None + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> Optional[_MuslVersion]: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache() +def _get_musl_version(executable: str) -> Optional[_MuslVersion]: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + with contextlib.ExitStack() as stack: + try: + f = stack.enter_context(open(executable, "rb")) + except OSError: + return None + ld = _parse_ld_musl_from_elf(f) + if not ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(arch: str) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param arch: Should be the part of platform tag after the ``linux_`` + prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a + prerequisite for the current platform to be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/_structures.py b/.eggs/packaging-21.3-py3.8.egg/packaging/_structures.py new file mode 100644 index 000000000..90a6465f9 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/markers.py b/.eggs/packaging-21.3-py3.8.egg/packaging/markers.py new file mode 100644 index 000000000..cb640e8f9 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/markers.py @@ -0,0 +1,304 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import operator +import os +import platform +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from pyparsing import ( # noqa: N817 + Forward, + Group, + Literal as L, + ParseException, + ParseResults, + QuotedString, + ZeroOrMore, + stringEnd, + stringStart, +) + +from .specifiers import InvalidSpecifier, Specifier + +__all__ = [ + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Node: + def __init__(self, value: Any) -> None: + self.value = value + + def __str__(self) -> str: + return str(self.value) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +VARIABLE = ( + L("implementation_version") + | L("platform_python_implementation") + | L("implementation_name") + | L("python_full_version") + | L("platform_release") + | L("platform_version") + | L("platform_machine") + | L("platform_system") + | L("python_version") + | L("sys_platform") + | L("os_name") + | L("os.name") # PEP-345 + | L("sys.platform") # PEP-345 + | L("platform.version") # PEP-345 + | L("platform.machine") # PEP-345 + | L("platform.python_implementation") # PEP-345 + | L("python_implementation") # undocumented setuptools legacy + | L("extra") # PEP-508 +) +ALIASES = { + "os.name": "os_name", + "sys.platform": "sys_platform", + "platform.version": "platform_version", + "platform.machine": "platform_machine", + "platform.python_implementation": "platform_python_implementation", + "python_implementation": "platform_python_implementation", +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) + +VERSION_CMP = ( + L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") +) + +MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) + +MARKER_VALUE = QuotedString("'") | QuotedString('"') +MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) + +BOOLOP = L("and") | L("or") + +MARKER_VAR = VARIABLE | MARKER_VALUE + +MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) +MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) + +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() + +MARKER_EXPR = Forward() +MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) +MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) + +MARKER = stringStart + MARKER_EXPR + stringEnd + + +def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: + if isinstance(results, ParseResults): + return [_coerce_parse_result(i) for i in results] + else: + return results + + +def _format_marker( + marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True +) -> str: + + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: Dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs) + + oper: Optional[Operator] = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +class Undefined: + pass + + +_undefined = Undefined() + + +def _get_env(environment: Dict[str, str], name: str) -> str: + value: Union[str, Undefined] = environment.get(name, _undefined) + + if isinstance(value, Undefined): + raise UndefinedEnvironmentName( + f"{name!r} does not exist in evaluation environment." + ) + + return value + + +def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + lhs_value = _get_env(environment, lhs.value) + rhs_value = rhs.value + else: + lhs_value = lhs.value + rhs_value = _get_env(environment, rhs.value) + + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: "sys._version_info") -> str: + version = "{0.major}.{0.minor}.{0.micro}".format(info) + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + try: + self._markers = _coerce_parse_result(MARKER.parseString(marker)) + except ParseException as e: + raise InvalidMarker( + f"Invalid marker: {marker!r}, parse error at " + f"{marker[e.loc : e.loc + 8]!r}" + ) + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + if environment is not None: + current_environment.update(environment) + + return _evaluate_markers(self._markers, current_environment) diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/py.typed b/.eggs/packaging-21.3-py3.8.egg/packaging/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/requirements.py b/.eggs/packaging-21.3-py3.8.egg/packaging/requirements.py new file mode 100644 index 000000000..53f9a3aa4 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/requirements.py @@ -0,0 +1,146 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +import string +import urllib.parse +from typing import List, Optional as TOptional, Set + +from pyparsing import ( # noqa + Combine, + Literal as L, + Optional, + ParseException, + Regex, + Word, + ZeroOrMore, + originalTextFor, + stringEnd, + stringStart, +) + +from .markers import MARKER_EXPR, Marker +from .specifiers import LegacySpecifier, Specifier, SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +ALPHANUM = Word(string.ascii_letters + string.digits) + +LBRACKET = L("[").suppress() +RBRACKET = L("]").suppress() +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() +COMMA = L(",").suppress() +SEMICOLON = L(";").suppress() +AT = L("@").suppress() + +PUNCTUATION = Word("-_.") +IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) +IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) + +NAME = IDENTIFIER("name") +EXTRA = IDENTIFIER + +URI = Regex(r"[^ ]+")("url") +URL = AT + URI + +EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) +EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") + +VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) +VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) + +VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY +VERSION_MANY = Combine( + VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False +)("_raw_spec") +_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") + +VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") +VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) + +MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") +MARKER_EXPR.setParseAction( + lambda s, l, t: Marker(s[t._original_start : t._original_end]) +) +MARKER_SEPARATOR = SEMICOLON +MARKER = MARKER_SEPARATOR + MARKER_EXPR + +VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) +URL_AND_MARKER = URL + Optional(MARKER) + +NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) + +REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd +# pyparsing isn't thread safe during initialization, so we do it eagerly, see +# issue #104 +REQUIREMENT.parseString("x[]") + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + req = REQUIREMENT.parseString(requirement_string) + except ParseException as e: + raise InvalidRequirement( + f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' + ) + + self.name: str = req.name + if req.url: + parsed_url = urllib.parse.urlparse(req.url) + if parsed_url.scheme == "file": + if urllib.parse.urlunparse(parsed_url) != req.url: + raise InvalidRequirement("Invalid URL given") + elif not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc + ): + raise InvalidRequirement(f"Invalid URL: {req.url}") + self.url: TOptional[str] = req.url + else: + self.url = None + self.extras: Set[str] = set(req.extras.asList() if req.extras else []) + self.specifier: SpecifierSet = SpecifierSet(req.specifier) + self.marker: TOptional[Marker] = req.marker if req.marker else None + + def __str__(self) -> str: + parts: List[str] = [self.name] + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + parts.append(f"[{formatted_extras}]") + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append(f"@ {self.url}") + if self.marker: + parts.append(" ") + + if self.marker: + parts.append(f"; {self.marker}") + + return "".join(parts) + + def __repr__(self) -> str: + return f"" diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/specifiers.py b/.eggs/packaging-21.3-py3.8.egg/packaging/specifiers.py new file mode 100644 index 000000000..0e218a6f9 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/specifiers.py @@ -0,0 +1,802 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import abc +import functools +import itertools +import re +import warnings +from typing import ( + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Pattern, + Set, + Tuple, + TypeVar, + Union, +) + +from .utils import canonicalize_version +from .version import LegacyVersion, Version, parse + +ParsedVersion = Union[Version, LegacyVersion] +UnparsedVersion = Union[Version, LegacyVersion, str] +VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) +CallableOperator = Callable[[ParsedVersion, str], bool] + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractproperty + def prereleases(self) -> Optional[bool]: + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators: Dict[str, str] = {} + _regex: Pattern[str] + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self) -> str: + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + return self._spec[0], canonicalize_version(self._spec[1]) + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self) -> str: + return self._spec[0] + + @property + def version(self) -> str: + return self._spec[1] + + @property + def prereleases(self) -> Optional[bool]: + return self._prereleases + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __contains__(self, item: str) -> bool: + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + normalized_item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex_str = r""" + (?P(==|!=|<=|>=|<|>)) + \s* + (?P + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. + ) + """ + + _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + super().__init__(spec, prereleases) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal( + self, prospective: LegacyVersion, spec: str + ) -> bool: + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective > self._coerce_version(spec) + + +def _require_version_compare( + fn: Callable[["Specifier", ParsedVersion, str], bool] +) -> Callable[["Specifier", ParsedVersion, str], bool]: + @functools.wraps(fn) + def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: + + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = ".".join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + @_require_version_compare + def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: + + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + split_spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + split_prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = split_prospective[: len(split_spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + padded_spec, padded_prospective = _pad_version( + split_spec, shortened_prospective + ) + + return padded_prospective == padded_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + @_require_version_compare + def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal( + self, prospective: ParsedVersion, spec: str + ) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self) -> bool: + + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> List[str]: + result: List[str] = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) + + +class SpecifierSet(BaseSpecifier): + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + + # Split on , to break each individual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed: Set[_IndividualSpecifier] = set() + for specifier in split_specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self) -> str: + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + if isinstance(other, (str, _IndividualSpecifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + return len(self._specs) + + def __iter__(self) -> Iterator[_IndividualSpecifier]: + return iter(self._specs) + + @property + def prereleases(self) -> Optional[bool]: + + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __contains__(self, item: UnparsedVersion) -> bool: + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered: List[VersionTypeVar] = [] + found_prereleases: List[VersionTypeVar] = [] + + item: UnparsedVersion + parsed_version: Union[Version, LegacyVersion] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/tags.py b/.eggs/packaging-21.3-py3.8.egg/packaging/tags.py new file mode 100644 index 000000000..9a3d25a71 --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/tags.py @@ -0,0 +1,487 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import logging +import platform +import sys +import sysconfig +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Dict, + FrozenSet, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +from . import _manylinux, _musllinux + +logger = logging.getLogger(__name__) + +PythonVersion = Sequence[int] +MacVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: Dict[str, str] = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 + + +class Tag: + """ + A representation of the tag triple for a wheel. + + Instances are considered immutable and thus are hashable. Equality checking + is also supported. + """ + + __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] + + def __init__(self, interpreter: str, abi: str, platform: str) -> None: + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + # The __hash__ of every single element in a Set[Tag] will be evaluated each time + # that a set calls its `.disjoint()` method, which may be called hundreds of + # times when scanning a page of links for packages with tags matching that + # Set[Tag]. Pre-computing the value here produces significant speedups for + # downstream consumers. + self._hash = hash((self._interpreter, self._abi, self._platform)) + + @property + def interpreter(self) -> str: + return self._interpreter + + @property + def abi(self) -> str: + return self._abi + + @property + def platform(self) -> str: + return self._platform + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Tag): + return NotImplemented + + return ( + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) + ) + + def __hash__(self) -> int: + return self._hash + + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" + + def __repr__(self) -> str: + return f"<{self} @ {id(self)}>" + + +def parse_tag(tag: str) -> FrozenSet[Tag]: + """ + Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. + + Returning a set is required due to the possibility that the tag is a + compressed tag set. + """ + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: + value = sysconfig.get_config_var(name) + if value is None and warn: + logger.debug( + "Config variable '%s' is unset, Python ABI tag may be incorrect", name + ) + return value + + +def _normalize_string(string: str) -> str: + return string.replace(".", "_").replace("-", "_") + + +def _abi3_applies(python_version: PythonVersion) -> bool: + """ + Determine if the Python version supports abi3. + + PEP 384 was first implemented in Python 3.2. + """ + return len(python_version) > 1 and tuple(python_version) >= (3, 2) + + +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: + py_version = tuple(py_version) # To allow for version comparison. + abis = [] + version = _version_nodot(py_version[:2]) + debug = pymalloc = ucs4 = "" + with_debug = _get_config_var("Py_DEBUG", warn) + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version < (3, 8): + with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append(f"cp{version}") + abis.insert( + 0, + "cp{version}{debug}{pymalloc}{ucs4}".format( + version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 + ), + ) + return abis + + +def cpython_tags( + python_version: Optional[PythonVersion] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a CPython interpreter. + + The tags consist of: + - cp-- + - cp-abi3- + - cp-none- + - cp-abi3- # Older Python versions down to 3.2. + + If python_version only specifies a major version then user-provided ABIs and + the 'none' ABItag will be used. + + If 'abi3' or 'none' are specified in 'abis' then they will be yielded at + their normal position and not at the beginning. + """ + if not python_version: + python_version = sys.version_info[:2] + + interpreter = f"cp{_version_nodot(python_version[:2])}" + + if abis is None: + if len(python_version) > 1: + abis = _cpython_abis(python_version, warn) + else: + abis = [] + abis = list(abis) + # 'abi3' and 'none' are explicitly handled later. + for explicit_abi in ("abi3", "none"): + try: + abis.remove(explicit_abi) + except ValueError: + pass + + platforms = list(platforms or platform_tags()) + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + if _abi3_applies(python_version): + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) + + if _abi3_applies(python_version): + for minor_version in range(python_version[1] - 1, 1, -1): + for platform_ in platforms: + interpreter = "cp{version}".format( + version=_version_nodot((python_version[0], minor_version)) + ) + yield Tag(interpreter, "abi3", platform_) + + +def _generic_abi() -> Iterator[str]: + abi = sysconfig.get_config_var("SOABI") + if abi: + yield _normalize_string(abi) + + +def generic_tags( + interpreter: Optional[str] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a generic interpreter. + + The tags consist of: + - -- + + The "none" ABI will be added if it was not explicitly provided. + """ + if not interpreter: + interp_name = interpreter_name() + interp_version = interpreter_version(warn=warn) + interpreter = "".join([interp_name, interp_version]) + if abis is None: + abis = _generic_abi() + platforms = list(platforms or platform_tags()) + abis = list(abis) + if "none" not in abis: + abis.append("none") + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: + """ + Yields Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all previous versions of that major version. + """ + if len(py_version) > 1: + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" + if len(py_version) > 1: + for minor in range(py_version[1] - 1, -1, -1): + yield f"py{_version_nodot((py_version[0], minor))}" + + +def compatible_tags( + python_version: Optional[PythonVersion] = None, + interpreter: Optional[str] = None, + platforms: Optional[Iterable[str]] = None, +) -> Iterator[Tag]: + """ + Yields the sequence of tags that are compatible with a specific version of Python. + + The tags consist of: + - py*-none- + - -none-any # ... if `interpreter` is provided. + - py*-none-any + """ + if not python_version: + python_version = sys.version_info[:2] + platforms = list(platforms or platform_tags()) + for version in _py_interpreter_range(python_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + if interpreter: + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(python_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + if cpu_arch in {"arm64", "x86_64"}: + formats.append("universal2") + + if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: + formats.append("universal") + + return formats + + +def mac_platforms( + version: Optional[MacVersion] = None, arch: Optional[str] = None +) -> Iterator[str]: + """ + Yields the platform tags for a macOS system. + + The `version` parameter is a two-item tuple specifying the macOS version to + generate platform tags for. The `arch` parameter is the CPU architecture to + generate platform tags for. Both parameters default to the appropriate value + for the current system. + """ + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + else: + version = version + if arch is None: + arch = _mac_arch(cpu_arch) + else: + arch = arch + + if (10, 0) <= version and version < (11, 0): + # Prior to Mac OS 11, each yearly release of Mac OS bumped the + # "minor" version number. The major version was always 10. + for minor_version in range(version[1], -1, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=10, minor=minor_version, binary_format=binary_format + ) + + if version >= (11, 0): + # Starting with Mac OS 11, each yearly release bumps the major version + # number. The minor versions are now the midyear updates. + for major_version in range(version[0], 10, -1): + compat_version = major_version, 0 + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=major_version, minor=0, binary_format=binary_format + ) + + if version >= (11, 0): + # Mac OS 11 on x86_64 is compatible with binaries from previous releases. + # Arm64 support was introduced in 11.0, so no Arm binaries from previous + # releases exist. + # + # However, the "universal2" binary format can have a + # macOS version earlier than 11.0 when the x86_64 part of the binary supports + # that version of macOS. + if arch == "x86_64": + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + else: + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_format = "universal2" + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + + +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) + if is_32bit: + if linux == "linux_x86_64": + linux = "linux_i686" + elif linux == "linux_aarch64": + linux = "linux_armv7l" + _, arch = linux.split("_", 1) + yield from _manylinux.platform_tags(linux, arch) + yield from _musllinux.platform_tags(arch) + yield linux + + +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) + + +def platform_tags() -> Iterator[str]: + """ + Provides the platform tags for this installation. + """ + if platform.system() == "Darwin": + return mac_platforms() + elif platform.system() == "Linux": + return _linux_platforms() + else: + return _generic_platforms() + + +def interpreter_name() -> str: + """ + Returns the name of the running interpreter. + """ + name = sys.implementation.name + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def interpreter_version(*, warn: bool = False) -> str: + """ + Returns the version of the running interpreter. + """ + version = _get_config_var("py_version_nodot", warn=warn) + if version: + version = str(version) + else: + version = _version_nodot(sys.version_info[:2]) + return version + + +def _version_nodot(version: PythonVersion) -> str: + return "".join(map(str, version)) + + +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + + interp_name = interpreter_name() + if interp_name == "cp": + yield from cpython_tags(warn=warn) + else: + yield from generic_tags() + + if interp_name == "pp": + yield from compatible_tags(interpreter="pp3") + else: + yield from compatible_tags() diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/utils.py b/.eggs/packaging-21.3-py3.8.egg/packaging/utils.py new file mode 100644 index 000000000..bab11b80c --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/utils.py @@ -0,0 +1,136 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +from typing import FrozenSet, NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +_canonicalize_regex = re.compile(r"[-_.]+") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str) -> NormalizedName: + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def canonicalize_version(version: Union[Version, str]) -> str: + """ + This is very similar to Version.__str__, but has one subtle difference + with the way it handles the release segment. + """ + if isinstance(version, str): + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + else: + parsed = version + + parts = [] + + # Epoch + if parsed.epoch != 0: + parts.append(f"{parsed.epoch}!") + + # Release segment + # NB: This strips trailing '.0's to normalize + parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) + + # Pre-release + if parsed.pre is not None: + parts.append("".join(str(x) for x in parsed.pre)) + + # Post-release + if parsed.post is not None: + parts.append(f".post{parsed.post}") + + # Development release + if parsed.dev is not None: + parts.append(f".dev{parsed.dev}") + + # Local version segment + if parsed.local is not None: + parts.append(f"+{parsed.local}") + + return "".join(parts) + + +def parse_wheel_filename( + filename: str, +) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename}") + name = canonicalize_name(name_part) + version = Version(parts[1]) + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in '{filename}'" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") + + name = canonicalize_name(name_part) + version = Version(version_part) + return (name, version) diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/version.py b/.eggs/packaging-21.3-py3.8.egg/packaging/version.py new file mode 100644 index 000000000..de9a09a4e --- /dev/null +++ b/.eggs/packaging-21.3-py3.8.egg/packaging/version.py @@ -0,0 +1,504 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import collections +import itertools +import re +import warnings +from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + Tuple[ + Union[ + SubLocalType, + Tuple[SubLocalType, str], + Tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = Tuple[ + int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = Tuple[int, Tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> Union["LegacyVersion", "Version"]: + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + _key: Union[CmpKey, LegacyCmpKey] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version: str) -> None: + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def __str__(self) -> str: + return self._version + + def __repr__(self) -> str: + return f"" + + @property + def public(self) -> str: + return self._version + + @property + def base_version(self) -> str: + return self._version + + @property + def epoch(self) -> int: + return -1 + + @property + def release(self) -> None: + return None + + @property + def pre(self) -> None: + return None + + @property + def post(self) -> None: + return None + + @property + def dev(self) -> None: + return None + + @property + def local(self) -> None: + return None + + @property + def is_prerelease(self) -> bool: + return False + + @property + def is_postrelease(self) -> bool: + return False + + @property + def is_devrelease(self) -> bool: + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s: str) -> Iterator[str]: + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version: str) -> LegacyCmpKey: + + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts: List[str] = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        _release: Tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        _pre: Optional[Tuple[str, int]] = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> Optional[int]:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/LICENSE b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/LICENSE
new file mode 100644
index 000000000..1bf98523e
--- /dev/null
+++ b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/LICENSE
@@ -0,0 +1,18 @@
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/PKG-INFO
new file mode 100644
index 000000000..bdfed0b61
--- /dev/null
+++ b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/PKG-INFO
@@ -0,0 +1,109 @@
+Metadata-Version: 2.1
+Name: pyparsing
+Version: 3.0.6
+Summary: Python parsing module
+Home-page: https://github.com/pyparsing/pyparsing/
+Author: Paul McGuire
+Author-email: ptmcg.gm+pyparsing@gmail.com
+License: MIT License
+Download-URL: https://pypi.org/project/pyparsing/
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Information Technology
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
+Provides-Extra: diagrams
+Requires-Dist: jinja2 ; extra == 'diagrams'
+Requires-Dist: railroad-diagrams ; extra == 'diagrams'
+
+PyParsing -- A Python Parsing Module
+====================================
+
+|Build Status| |Coverage|
+
+Introduction
+============
+
+The pyparsing module is an alternative approach to creating and
+executing simple grammars, vs. the traditional lex/yacc approach, or the
+use of regular expressions. The pyparsing module provides a library of
+classes that client code uses to construct the grammar directly in
+Python code.
+
+*[Since first writing this description of pyparsing in late 2003, this
+technique for developing parsers has become more widespread, under the
+name Parsing Expression Grammars - PEGs. See more information on PEGs*
+`here `__
+*.]*
+
+Here is a program to parse ``"Hello, World!"`` (or any greeting of the form
+``"salutation, addressee!"``):
+
+.. code:: python
+
+    from pyparsing import Word, alphas
+    greet = Word(alphas) + "," + Word(alphas) + "!"
+    hello = "Hello, World!"
+    print(hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+    Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the
+self-explanatory class names, and the use of '+', '|' and '^' operator
+definitions.
+
+The parsed results returned from ``parseString()`` is a collection of type
+``ParseResults``, which can be accessed as a
+nested list, a dictionary, or an object with named attributes.
+
+The pyparsing module handles some of the problems that are typically
+vexing when writing text parsers:
+
+- extra or missing whitespace (the above program will also handle ``"Hello,World!"``, ``"Hello , World !"``, etc.)
+- quoted strings
+- embedded comments
+
+The examples directory includes a simple SQL parser, simple CORBA IDL
+parser, a config file parser, a chemical formula parser, and a four-
+function algebraic notation parser, among many others.
+
+Documentation
+=============
+
+There are many examples in the online docstrings of the classes
+and methods in pyparsing. You can find them compiled into `online docs `__. Additional
+documentation resources and project info are listed in the online
+`GitHub wiki `__. An
+entire directory of examples can be found `here `__.
+
+License
+=======
+
+MIT License. See header of the `pyparsing.py `__ file.
+
+History
+=======
+
+See `CHANGES `__ file.
+
+.. |Build Status| image:: https://travis-ci.com/pyparsing/pyparsing.svg?branch=master
+   :target: https://travis-ci.com/pyparsing/pyparsing
+.. |Coverage| image:: https://codecov.io/gh/pyparsing/pyparsing/branch/master/graph/badge.svg
+  :target: https://codecov.io/gh/pyparsing/pyparsing
+
+
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD
new file mode 100644
index 000000000..133234be2
--- /dev/null
+++ b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD
@@ -0,0 +1,17 @@
+pyparsing/__init__.py,sha256=3P-TNmKx_H__Ygk98CCZML_ksmPr5ZeSkhigL6RzUPQ,9095
+pyparsing/actions.py,sha256=60v7mETOBzc01YPH_qQD5isavgcSJpAfIKpzgjM3vaU,6429
+pyparsing/common.py,sha256=lFL97ooIeR75CmW5hjURZqwDCTgruqltcTCZ-ulLO2Q,12936
+pyparsing/core.py,sha256=SAChE9VFq7e5SZ0ggo__-HtVFxCrast_xO_todzqBZ4,210727
+pyparsing/exceptions.py,sha256=H4D9gqMavqmAFSsdrU_J6bO-jA-T-A7yvtXWZpooIUA,9030
+pyparsing/helpers.py,sha256=rKkeQ2UExJuBfksZhSZKqME9iXhdGdsl7686_M0nwXE,37881
+pyparsing/results.py,sha256=VLYlrNL_wqsJ1EFDffJzpt4MNyEDqKTgXHnb7eKzQXs,25295
+pyparsing/testing.py,sha256=szs8AKZREZMhL0y0vsMfaTVAnpqPHetg6VKJBNmc4QY,13388
+pyparsing/unicode.py,sha256=0QLjg83PQssSC6dkaZRm9wChE10mDi8kYEO4EvDB8qg,10379
+pyparsing/util.py,sha256=U-juTQjXJ0fqLEX3BBZVBlbAMHrQiGUBMojXYNbnGEM,6734
+pyparsing/diagram/__init__.py,sha256=yySG7RAh6JHuM8xewjaZjY4EWlIc6bX6neHxzTOjuoM,22136
+pyparsing/diagram/template.jinja2,sha256=SfQ8SLktSBqI5W1DGcUVH1vdflRD6x2sQBApxrcNg7s,589
+pyparsing-3.0.6.dist-info/LICENSE,sha256=ENUSChaAWAT_2otojCIL-06POXQbVzIGBNRVowngGXI,1023
+pyparsing-3.0.6.dist-info/METADATA,sha256=16W9SlEjUqdw6HHeEjo4bH_g4_Ti8hYzuf68RRYv5Zg,4169
+pyparsing-3.0.6.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
+pyparsing-3.0.6.dist-info/top_level.txt,sha256=eUOjGzJVhlQ3WS2rFAy2mN3LX_7FKTM5GSJ04jfnLmU,10
+pyparsing-3.0.6.dist-info/RECORD,,
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL
new file mode 100644
index 000000000..385faab05
--- /dev/null
+++ b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/requires.txt b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/requires.txt
new file mode 100644
index 000000000..e185380d6
--- /dev/null
+++ b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/requires.txt
@@ -0,0 +1,4 @@
+
+[diagrams]
+jinja2
+railroad-diagrams
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/top_level.txt b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/top_level.txt
new file mode 100644
index 000000000..210dfec50
--- /dev/null
+++ b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/top_level.txt
@@ -0,0 +1 @@
+pyparsing
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/__init__.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/__init__.py
new file mode 100644
index 000000000..288618fe7
--- /dev/null
+++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/__init__.py
@@ -0,0 +1,328 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2021  Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = """
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and
+executing simple grammars, vs. the traditional lex/yacc approach, or the
+use of regular expressions.  With pyparsing, you don't need to learn
+a new syntax for defining grammars or matching expressions - the parsing
+module provides a library of classes that you use to construct the
+grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form
+``", !"``), built up using :class:`Word`,
+:class:`Literal`, and :class:`And` elements
+(the :meth:`'+'` operators create :class:`And` expressions,
+and the strings are auto-converted to :class:`Literal` expressions)::
+
+    from pyparsing import Word, alphas
+
+    # define grammar of a greeting
+    greet = Word(alphas) + "," + Word(alphas) + "!"
+
+    hello = "Hello, World!"
+    print(hello, "->", greet.parse_string(hello))
+
+The program outputs the following::
+
+    Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the
+self-explanatory class names, and the use of :class:`'+'`,
+:class:`'|'`, :class:`'^'` and :class:`'&'` operators.
+
+The :class:`ParseResults` object returned from
+:class:`ParserElement.parseString` can be
+accessed as a nested list, a dictionary, or an object with named
+attributes.
+
+The pyparsing module handles some of the problems that are typically
+vexing when writing text parsers:
+
+  - extra or missing whitespace (the above program will also handle
+    "Hello,World!", "Hello  ,  World  !", etc.)
+  - quoted strings
+  - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes :class:`ParserElement` and :class:`ParseResults` to
+see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+
+ - construct literal match expressions from :class:`Literal` and
+   :class:`CaselessLiteral` classes
+ - construct character word-group expressions using the :class:`Word`
+   class
+ - see how to create repetitive expressions using :class:`ZeroOrMore`
+   and :class:`OneOrMore` classes
+ - use :class:`'+'`, :class:`'|'`, :class:`'^'`,
+   and :class:`'&'` operators to combine simple expressions into
+   more complex ones
+ - associate names with your parsed results using
+   :class:`ParserElement.setResultsName`
+ - access the parsed data, which is returned as a :class:`ParseResults`
+   object
+ - find some helpful expression short-cuts like :class:`delimitedList`
+   and :class:`oneOf`
+ - find more useful common expressions in the :class:`pyparsing_common`
+   namespace class
+"""
+from typing import NamedTuple
+
+
+class version_info(NamedTuple):
+    major: int
+    minor: int
+    micro: int
+    releaselevel: str
+    serial: int
+
+    @property
+    def __version__(self):
+        return "{}.{}.{}".format(self.major, self.minor, self.micro) + (
+            "{}{}{}".format(
+                "r" if self.releaselevel[0] == "c" else "",
+                self.releaselevel[0],
+                self.serial,
+            ),
+            "",
+        )[self.releaselevel == "final"]
+
+    def __str__(self):
+        return "{} {} / {}".format(__name__, self.__version__, __version_time__)
+
+    def __repr__(self):
+        return "{}.{}({})".format(
+            __name__,
+            type(self).__name__,
+            ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
+        )
+
+
+__version_info__ = version_info(3, 0, 6, "final", 0)
+__version_time__ = "12 Nov 2021 16:06 UTC"
+__version__ = __version_info__.__version__
+__versionTime__ = __version_time__
+__author__ = "Paul McGuire "
+
+from .util import *
+from .exceptions import *
+from .actions import *
+from .core import __diag__, __compat__
+from .results import *
+from .core import *
+from .core import _builtin_exprs as core_builtin_exprs
+from .helpers import *
+from .helpers import _builtin_exprs as helper_builtin_exprs
+
+from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
+from .testing import pyparsing_test as testing
+from .common import (
+    pyparsing_common as common,
+    _builtin_exprs as common_builtin_exprs,
+)
+
+# define backward compat synonyms
+if "pyparsing_unicode" not in globals():
+    pyparsing_unicode = unicode
+if "pyparsing_common" not in globals():
+    pyparsing_common = common
+if "pyparsing_test" not in globals():
+    pyparsing_test = testing
+
+core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
+
+
+__all__ = [
+    "__version__",
+    "__version_time__",
+    "__author__",
+    "__compat__",
+    "__diag__",
+    "And",
+    "AtLineStart",
+    "AtStringStart",
+    "CaselessKeyword",
+    "CaselessLiteral",
+    "CharsNotIn",
+    "Combine",
+    "Dict",
+    "Each",
+    "Empty",
+    "FollowedBy",
+    "Forward",
+    "GoToColumn",
+    "Group",
+    "IndentedBlock",
+    "Keyword",
+    "LineEnd",
+    "LineStart",
+    "Literal",
+    "Located",
+    "PrecededBy",
+    "MatchFirst",
+    "NoMatch",
+    "NotAny",
+    "OneOrMore",
+    "OnlyOnce",
+    "OpAssoc",
+    "Opt",
+    "Optional",
+    "Or",
+    "ParseBaseException",
+    "ParseElementEnhance",
+    "ParseException",
+    "ParseExpression",
+    "ParseFatalException",
+    "ParseResults",
+    "ParseSyntaxException",
+    "ParserElement",
+    "PositionToken",
+    "QuotedString",
+    "RecursiveGrammarException",
+    "Regex",
+    "SkipTo",
+    "StringEnd",
+    "StringStart",
+    "Suppress",
+    "Token",
+    "TokenConverter",
+    "White",
+    "Word",
+    "WordEnd",
+    "WordStart",
+    "ZeroOrMore",
+    "Char",
+    "alphanums",
+    "alphas",
+    "alphas8bit",
+    "any_close_tag",
+    "any_open_tag",
+    "c_style_comment",
+    "col",
+    "common_html_entity",
+    "counted_array",
+    "cpp_style_comment",
+    "dbl_quoted_string",
+    "dbl_slash_comment",
+    "delimited_list",
+    "dict_of",
+    "empty",
+    "hexnums",
+    "html_comment",
+    "identchars",
+    "identbodychars",
+    "java_style_comment",
+    "line",
+    "line_end",
+    "line_start",
+    "lineno",
+    "make_html_tags",
+    "make_xml_tags",
+    "match_only_at_col",
+    "match_previous_expr",
+    "match_previous_literal",
+    "nested_expr",
+    "null_debug_action",
+    "nums",
+    "one_of",
+    "printables",
+    "punc8bit",
+    "python_style_comment",
+    "quoted_string",
+    "remove_quotes",
+    "replace_with",
+    "replace_html_entity",
+    "rest_of_line",
+    "sgl_quoted_string",
+    "srange",
+    "string_end",
+    "string_start",
+    "trace_parse_action",
+    "unicode_string",
+    "with_attribute",
+    "indentedBlock",
+    "original_text_for",
+    "ungroup",
+    "infix_notation",
+    "locatedExpr",
+    "with_class",
+    "CloseMatch",
+    "token_map",
+    "pyparsing_common",
+    "pyparsing_unicode",
+    "unicode_set",
+    "condition_as_parse_action",
+    "pyparsing_test",
+    # pre-PEP8 compatibility names
+    "__versionTime__",
+    "anyCloseTag",
+    "anyOpenTag",
+    "cStyleComment",
+    "commonHTMLEntity",
+    "countedArray",
+    "cppStyleComment",
+    "dblQuotedString",
+    "dblSlashComment",
+    "delimitedList",
+    "dictOf",
+    "htmlComment",
+    "javaStyleComment",
+    "lineEnd",
+    "lineStart",
+    "makeHTMLTags",
+    "makeXMLTags",
+    "matchOnlyAtCol",
+    "matchPreviousExpr",
+    "matchPreviousLiteral",
+    "nestedExpr",
+    "nullDebugAction",
+    "oneOf",
+    "opAssoc",
+    "pythonStyleComment",
+    "quotedString",
+    "removeQuotes",
+    "replaceHTMLEntity",
+    "replaceWith",
+    "restOfLine",
+    "sglQuotedString",
+    "stringEnd",
+    "stringStart",
+    "traceParseAction",
+    "unicodeString",
+    "withAttribute",
+    "indentedBlock",
+    "originalTextFor",
+    "infixNotation",
+    "locatedExpr",
+    "withClass",
+    "tokenMap",
+    "conditionAsParseAction",
+    "autoname_elements",
+]
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/actions.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/actions.py
new file mode 100644
index 000000000..2bcc5502b
--- /dev/null
+++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/actions.py
@@ -0,0 +1,207 @@
+# actions.py
+
+from .exceptions import ParseException
+from .util import col
+
+
+class OnlyOnce:
+    """
+    Wrapper for parse actions, to ensure they are only called once.
+    """
+
+    def __init__(self, method_call):
+        from .core import _trim_arity
+
+        self.callable = _trim_arity(method_call)
+        self.called = False
+
+    def __call__(self, s, l, t):
+        if not self.called:
+            results = self.callable(s, l, t)
+            self.called = True
+            return results
+        raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
+
+    def reset(self):
+        """
+        Allow the associated parse action to be called once more.
+        """
+
+        self.called = False
+
+
+def match_only_at_col(n):
+    """
+    Helper method for defining parse actions that require matching at
+    a specific column in the input text.
+    """
+
+    def verify_col(strg, locn, toks):
+        if col(locn, strg) != n:
+            raise ParseException(strg, locn, "matched token not at column {}".format(n))
+
+    return verify_col
+
+
+def replace_with(repl_str):
+    """
+    Helper method for common parse actions that simply return
+    a literal value.  Especially useful when used with
+    :class:`transform_string` ().
+
+    Example::
+
+        num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
+        na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
+        term = na | num
+
+        OneOrMore(term).parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
+    """
+    return lambda s, l, t: [repl_str]
+
+
+def remove_quotes(s, l, t):
+    """
+    Helper parse action for removing quotation marks from parsed
+    quoted strings.
+
+    Example::
+
+        # by default, quotation marks are included in parsed results
+        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+        # use remove_quotes to strip quotation marks from parsed results
+        quoted_string.set_parse_action(remove_quotes)
+        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+    """
+    return t[0][1:-1]
+
+
+def with_attribute(*args, **attr_dict):
+    """
+    Helper to create a validating parse action to be used with start
+    tags created with :class:`make_xml_tags` or
+    :class:`make_html_tags`. Use ``with_attribute`` to qualify
+    a starting tag with a required attribute value, to avoid false
+    matches on common tags such as ```` or ``
``. + + Call ``with_attribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`with_class`. + + To verify that the attribute exists, but without specifying a value, + pass ``with_attribute.ANY_VALUE`` as the value. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this has no type
+
+ + ''' + div,div_end = make_html_tags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().set_parse_action(with_attribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attr_dict.items() + attrs = [(k, v) for k, v in attrs] + + def pa(s, l, tokens): + for attrName, attrValue in attrs: + if attrName not in tokens: + raise ParseException(s, l, "no matching attribute " + attrName) + if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException( + s, + l, + "attribute {!r} has value {!r}, must be {!r}".format( + attrName, tokens[attrName], attrValue + ), + ) + + return pa + + +with_attribute.ANY_VALUE = object() + + +def with_class(classname, namespace=""): + """ + Simplified version of :class:`with_attribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this <div> has no class
+
+ + ''' + div,div_end = make_html_tags("div") + div_grid = div().set_parse_action(with_class("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "{}:class".format(namespace) if namespace else "class" + return with_attribute(**{classattr: classname}) + + +# pre-PEP8 compatibility symbols +replaceWith = replace_with +removeQuotes = remove_quotes +withAttribute = with_attribute +withClass = with_class +matchOnlyAtCol = match_only_at_col diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/common.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/common.py new file mode 100644 index 000000000..1859fb79c --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/common.py @@ -0,0 +1,424 @@ +# common.py +from .core import * +from .helpers import delimited_list, any_open_tag, any_close_tag +from datetime import datetime + + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """Here are some common low-level expressions that may be useful in + jump-starting parser development: + + - numeric forms (:class:`integers`, :class:`reals`, + :class:`scientific notation`) + - common :class:`programming identifiers` + - network addresses (:class:`MAC`, + :class:`IPv4`, :class:`IPv6`) + - ISO8601 :class:`dates` and + :class:`datetime` + - :class:`UUID` + - :class:`comma-separated list` + - :class:`url` + + Parse actions: + + - :class:`convertToInteger` + - :class:`convertToFloat` + - :class:`convertToDate` + - :class:`convertToDatetime` + - :class:`stripHTMLTags` + - :class:`upcaseTokens` + - :class:`downcaseTokens` + + Example:: + + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + + prints:: + + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convert_to_integer = token_map(int) + """ + Parse action for converting parsed integers to Python int + """ + + convert_to_float = token_map(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = ( + Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) + ) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = ( + Regex(r"[+-]?\d+") + .set_name("signed integer") + .set_parse_action(convert_to_integer) + ) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = ( + signed_integer().set_parse_action(convert_to_float) + + "/" + + signed_integer().set_parse_action(convert_to_float) + ).set_name("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) + + mixed_integer = ( + fraction | signed_integer + Opt(Opt("-").suppress() + fraction) + ).set_name("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.add_parse_action(sum) + + real = ( + Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") + .set_name("real number") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number and returns a float""" + + sci_real = ( + Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") + .set_name("real number with scientific notation") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number with optional + scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).setName("number").streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = ( + Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") + .set_name("fnumber") + .set_parse_action(convert_to_float) + ) + """any int or real number, returned as float""" + + identifier = Word(identchars, identbodychars).set_name("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex( + r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" + ).set_name("IPv4 address") + "IPv4 address (``0.0.0.0 - 255.255.255.255``)" + + _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") + _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( + "full IPv6 address" + ) + _short_ipv6_address = ( + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + + "::" + + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + ).set_name("short IPv6 address") + _short_ipv6_address.add_condition( + lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 + ) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") + ipv6_address = Combine( + (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( + "IPv6 address" + ) + ).set_name("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex( + r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" + ).set_name("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convert_to_date(fmt: str = "%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) + + Example:: + + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + + prints:: + + [datetime.date(1999, 12, 31)] + """ + + def cvt_fn(ss, ll, tt): + try: + return datetime.strptime(tt[0], fmt).date() + except ValueError as ve: + raise ParseException(ss, ll, str(ve)) + + return cvt_fn + + @staticmethod + def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): + """Helper to create a parse action for converting parsed + datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) + + Example:: + + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + + prints:: + + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + + def cvt_fn(s, l, t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + + return cvt_fn + + iso8601_date = Regex( + r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" + ).set_name("ISO8601 date") + "ISO8601 date (``yyyy-mm-dd``)" + + iso8601_datetime = Regex( + r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" + ).set_name("ISO8601 datetime") + "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" + + uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") + "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" + + _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() + + @staticmethod + def strip_html_tags(s: str, l: int, tokens: ParseResults): + """Parse action to remove HTML tags from web page HTML source + + Example:: + + # strip HTML links from normal text + text = 'More info at the pyparsing wiki page' + td, td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + print(table_text.parseString(text).body) + + Prints:: + + More info at the pyparsing wiki page + """ + return pyparsing_common._html_stripper.transform_string(tokens[0]) + + _commasepitem = ( + Combine( + OneOrMore( + ~Literal(",") + + ~LineEnd() + + Word(printables, exclude_chars=",") + + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) + ) + ) + .streamline() + .set_name("commaItem") + ) + comma_separated_list = delimited_list( + Opt(quoted_string.copy() | _commasepitem, default="") + ).set_name("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcase_tokens = staticmethod(token_map(lambda t: t.upper())) + """Parse action to convert tokens to upper case.""" + + downcase_tokens = staticmethod(token_map(lambda t: t.lower())) + """Parse action to convert tokens to lower case.""" + + # fmt: off + url = Regex( + # https://mathiasbynens.be/demo/url-regex + # https://gist.github.com/dperini/729294 + r"^" + + # protocol identifier (optional) + # short syntax // still required + r"(?:(?:(?Phttps?|ftp):)?\/\/)" + + # user:pass BasicAuth (optional) + r"(?:(?P\S+(?::\S*)?)@)?" + + r"(?P" + + # IP address exclusion + # private & local networks + r"(?!(?:10|127)(?:\.\d{1,3}){3})" + + r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + + r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + + # IP address dotted notation octets + # excludes loopback network 0.0.0.0 + # excludes reserved space >= 224.0.0.0 + # excludes network & broadcast addresses + # (first & last IP address of each class) + r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + + r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + + r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + + r"|" + + # host & domain names, may end with dot + # can be replaced by a shortest alternative + # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ + r"(?:" + + r"(?:" + + r"[a-z0-9\u00a1-\uffff]" + + r"[a-z0-9\u00a1-\uffff_-]{0,62}" + + r")?" + + r"[a-z0-9\u00a1-\uffff]\." + + r")+" + + # TLD identifier name, may end with dot + r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + + r")" + + # port number (optional) + r"(:(?P\d{2,5}))?" + + # resource path (optional) + r"(?P\/[^?# ]*)?" + + # query string (optional) + r"(\?(?P[^#]*))?" + + # fragment (optional) + r"(#(?P\S*))?" + + r"$" + ).set_name("url") + # fmt: on + + # pre-PEP8 compatibility names + convertToInteger = convert_to_integer + convertToFloat = convert_to_float + convertToDate = convert_to_date + convertToDatetime = convert_to_datetime + stripHTMLTags = strip_html_tags + upcaseTokens = upcase_tokens + downcaseTokens = downcase_tokens + + +_builtin_exprs = [ + v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) +] diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/core.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/core.py new file mode 100644 index 000000000..ff24eee50 --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/core.py @@ -0,0 +1,5772 @@ +# +# core.py +# +import os +from typing import ( + Optional as OptionalType, + Iterable as IterableType, + Union, + Callable, + Any, + Generator, + Tuple, + List, + TextIO, + Set, + Dict as DictType, +) +from abc import ABC, abstractmethod +from enum import Enum +import string +import copy +import warnings +import re +import sre_constants +import sys +from collections.abc import Iterable +import traceback +import types +from operator import itemgetter +from functools import wraps +from threading import RLock +from pathlib import Path + +from .util import ( + _FifoCache, + _UnboundedCache, + __config_flags, + _collapse_string_to_ranges, + _escape_regex_range_chars, + _bslash, + _flatten, + LRUMemo as _LRUMemo, + UnboundedMemo as _UnboundedMemo, +) +from .exceptions import * +from .actions import * +from .results import ParseResults, _ParseResultsWithOffset +from .unicode import pyparsing_unicode + +_MAX_INT = sys.maxsize +str_type: Tuple[type, ...] = (str, bytes) + +# +# Copyright (c) 2003-2021 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + + +class __compat__(__config_flags): + """ + A cross-version compatibility configuration for pyparsing features that will be + released in a future version. By setting values in this configuration to True, + those features can be enabled in prior versions for compatibility development + and testing. + + - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping + of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; + maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 + behavior + """ + + _type_desc = "compatibility" + + collect_all_And_tokens = True + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _fixed_names = """ + collect_all_And_tokens + """.split() + + +class __diag__(__config_flags): + _type_desc = "diagnostic" + + warn_multiple_tokens_in_named_alternation = False + warn_ungrouped_named_tokens_in_collection = False + warn_name_set_on_empty_Forward = False + warn_on_parse_using_empty_Forward = False + warn_on_assignment_to_Forward = False + warn_on_multiple_string_args_to_oneof = False + warn_on_match_first_with_lshift_operator = False + enable_debug_on_named_expressions = False + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _warning_names = [name for name in _all_names if name.startswith("warn")] + _debug_names = [name for name in _all_names if name.startswith("enable_debug")] + + @classmethod + def enable_all_warnings(cls): + for name in cls._warning_names: + cls.enable(name) + + +class Diagnostics(Enum): + """ + Diagnostic configuration (all default to disabled) + - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results + name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions + - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results + name is defined on a containing expression with ungrouped subexpressions that also + have results names + - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined + with a results name, but has no contents defined + - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is + defined in a grammar but has never had an expression attached to it + - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined + but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` + - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is + incorrectly called with multiple str arguments + - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent + calls to :class:`ParserElement.set_name` + + Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. + All warnings can be enabled by calling :class:`enable_all_warnings`. + """ + + warn_multiple_tokens_in_named_alternation = 0 + warn_ungrouped_named_tokens_in_collection = 1 + warn_name_set_on_empty_Forward = 2 + warn_on_parse_using_empty_Forward = 3 + warn_on_assignment_to_Forward = 4 + warn_on_multiple_string_args_to_oneof = 5 + warn_on_match_first_with_lshift_operator = 6 + enable_debug_on_named_expressions = 7 + + +def enable_diag(diag_enum): + """ + Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.enable(diag_enum.name) + + +def disable_diag(diag_enum): + """ + Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.disable(diag_enum.name) + + +def enable_all_warnings(): + """ + Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). + """ + __diag__.enable_all_warnings() + + +# hide abstract class +del __config_flags + + +def _should_enable_warnings( + cmd_line_warn_options: List[str], warn_env_var: OptionalType[str] +) -> bool: + enable = bool(warn_env_var) + for warn_opt in cmd_line_warn_options: + w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( + ":" + )[:5] + if not w_action.lower().startswith("i") and ( + not (w_message or w_category or w_module) or w_module == "pyparsing" + ): + enable = True + elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): + enable = False + return enable + + +if _should_enable_warnings( + sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") +): + enable_all_warnings() + + +# build list of single arg builtins, that can be used as parse actions +_single_arg_builtins = { + sum, + len, + sorted, + reversed, + list, + tuple, + set, + any, + all, + min, + max, +} + +_generatorType = types.GeneratorType +ParseAction = Union[ + Callable[[], Any], + Callable[[ParseResults], Any], + Callable[[int, ParseResults], Any], + Callable[[str, int, ParseResults], Any], +] +ParseCondition = Union[ + Callable[[], bool], + Callable[[ParseResults], bool], + Callable[[int, ParseResults], bool], + Callable[[str, int, ParseResults], bool], +] +ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] +DebugStartAction = Callable[[str, int, "ParserElement", bool], None] +DebugSuccessAction = Callable[ + [str, int, int, "ParserElement", ParseResults, bool], None +] +DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] + + +alphas = string.ascii_uppercase + string.ascii_lowercase +identchars = pyparsing_unicode.Latin1.identchars +identbodychars = pyparsing_unicode.Latin1.identbodychars +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +printables = "".join(c for c in string.printable if c not in string.whitespace) + +_trim_arity_call_line = None + + +def _trim_arity(func, maxargs=2): + """decorator to trim function calls to match the arity of the target""" + global _trim_arity_call_line + + if func in _single_arg_builtins: + return lambda s, l, t: func(t) + + limit = 0 + found_arity = False + + def extract_tb(tb, limit=0): + frames = traceback.extract_tb(tb, limit=limit) + frame_summary = frames[-1] + return [frame_summary[:2]] + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + LINE_DIFF = 11 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + _trim_arity_call_line = ( + _trim_arity_call_line or traceback.extract_stack(limit=2)[-1] + ) + pa_call_line_synth = ( + _trim_arity_call_line[0], + _trim_arity_call_line[1] + LINE_DIFF, + ) + + def wrapper(*args): + nonlocal found_arity, limit + while 1: + try: + ret = func(*args[limit:]) + found_arity = True + return ret + except TypeError as te: + # re-raise TypeErrors if they did not come from our arity testing + if found_arity: + raise + else: + tb = te.__traceback__ + trim_arity_type_error = ( + extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth + ) + del tb + + if trim_arity_type_error: + if limit <= maxargs: + limit += 1 + continue + + raise + + # copy func name to wrapper for sensible debug output + # (can't use functools.wraps, since that messes with function signature) + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + wrapper.__name__ = func_name + + return wrapper + + +def condition_as_parse_action( + fn: ParseCondition, message: str = None, fatal: bool = False +): + """ + Function to convert a simple predicate function that returns ``True`` or ``False`` + into a parse action. Can be used in places when a parse action is required + and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition + to an operator level in :class:`infix_notation`). + + Optional keyword arguments: + + - ``message`` - define a custom message to be used in the raised exception + - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; + otherwise will raise :class:`ParseException` + + """ + msg = message if message is not None else "failed user-defined condition" + exc_type = ParseFatalException if fatal else ParseException + fn = _trim_arity(fn) + + @wraps(fn) + def pa(s, l, t): + if not bool(fn(s, l, t)): + raise exc_type(s, l, msg) + + return pa + + +def _default_start_debug_action( + instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False +): + cache_hit_str = "*" if cache_hit else "" + print( + ( + "{}Match {} at loc {}({},{})\n {}\n {}^".format( + cache_hit_str, + expr, + loc, + lineno(loc, instring), + col(loc, instring), + line(loc, instring), + " " * (col(loc, instring) - 1), + ) + ) + ) + + +def _default_success_debug_action( + instring: str, + startloc: int, + endloc: int, + expr: "ParserElement", + toks: ParseResults, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list())) + + +def _default_exception_debug_action( + instring: str, + loc: int, + expr: "ParserElement", + exc: Exception, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print( + "{}Match {} failed, {} raised: {}".format( + cache_hit_str, expr, type(exc).__name__, exc + ) + ) + + +def null_debug_action(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + + +class ParserElement(ABC): + """Abstract base level parser element class.""" + + DEFAULT_WHITE_CHARS: str = " \n\t\r" + verbose_stacktrace: bool = False + _literalStringClass: OptionalType[type] = None + + @staticmethod + def set_default_whitespace_chars(chars: str): + r""" + Overrides the default whitespace chars + + Example:: + + # default whitespace chars are space, and newline + OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.set_default_whitespace_chars(" \t") + OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def'] + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + # update whitespace all parse expressions defined in this module + for expr in _builtin_exprs: + if expr.copyDefaultWhiteChars: + expr.whiteChars = set(chars) + + @staticmethod + def inline_literals_using(cls: type): + """ + Set class to be used for inclusion of string literals into a parser. + + Example:: + + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inline_literals_using(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] + """ + ParserElement._literalStringClass = cls + + def __init__(self, savelist: bool = False): + self.parseAction: List[ParseAction] = list() + self.failAction: OptionalType[ParseFailAction] = None + self.customName = None + self._defaultName = None + self.resultsName = None + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + self.copyDefaultWhiteChars = True + # used when checking for left-recursion + self.mayReturnEmpty = False + self.keepTabs = False + self.ignoreExprs: List["ParserElement"] = list() + self.debug = False + self.streamlined = False + # optimize exception handling for subclasses that don't advance parse index + self.mayIndexError = True + self.errmsg = "" + # mark results names as modal (report only last) or cumulative (list all) + self.modalResults = True + # custom debug actions + self.debugActions: Tuple[ + OptionalType[DebugStartAction], + OptionalType[DebugSuccessAction], + OptionalType[DebugExceptionAction], + ] = (None, None, None) + self.re = None + # avoid redundant calls to preParse + self.callPreparse = True + self.callDuringTry = False + self.suppress_warnings_ = [] + + def suppress_warning(self, warning_type: Diagnostics): + """ + Suppress warnings emitted for a particular diagnostic on this expression. + + Example:: + + base = pp.Forward() + base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) + + # statement would normally raise a warning, but is now suppressed + print(base.parseString("x")) + + """ + self.suppress_warnings_.append(warning_type) + return self + + def copy(self) -> "ParserElement": + """ + Make a copy of this :class:`ParserElement`. Useful for defining + different parse actions for the same parsing pattern, using copies of + the original parse element. + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") + integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + + print(OneOrMore(integerK | integerM | integer).parse_string("5K 100 640K 256M")) + + prints:: + + [5120, 100, 655360, 268435456] + + Equivalent form of ``expr.copy()`` is just ``expr()``:: + + integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + """ + cpy = copy.copy(self) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + return cpy + + def set_results_name( + self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False + ) -> "ParserElement": + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + + Normally, results names are assigned as you would assign keys in a dict: + any existing value is overwritten by later values. If it is necessary to + keep all values captured for a particular results name, call ``set_results_name`` + with ``list_all_matches`` = True. + + NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + ``expr("name")`` in place of ``expr.set_results_name("name")`` + - see :class:`__call__`. If ``list_all_matches`` is required, use + ``expr("name*")``. + + Example:: + + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + listAllMatches = listAllMatches or list_all_matches + return self._setResultsName(name, listAllMatches) + + def _setResultsName(self, name, listAllMatches=False): + if name is None: + return self + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + listAllMatches = True + newself.resultsName = name + newself.modalResults = not listAllMatches + return newself + + def set_break(self, break_flag: bool = True) -> "ParserElement": + """ + Method to invoke the Python pdb debugger when this element is + about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to + disable. + """ + if break_flag: + _parseMethod = self._parse + + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + + # this call to pdb.set_trace() is intentional, not a checkin error + pdb.set_trace() + return _parseMethod(instring, loc, doActions, callPreParse) + + breaker._originalParseMethod = _parseMethod + self._parse = breaker + else: + if hasattr(self._parse, "_originalParseMethod"): + self._parse = self._parse._originalParseMethod + return self + + def set_parse_action( + self, *fns: ParseAction, **kwargs + ) -> OptionalType["ParserElement"]: + """ + Define one or more actions to perform when successfully matching parse element definition. + + Parse actions can be called to perform data conversions, do extra validation, + update external data structures, or enhance or replace the parsed tokens. + Each parse action ``fn`` is a callable method with 0-3 arguments, called as + ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object + + The parsed tokens are passed to the parse action as ParseResults. They can be + modified in place using list-style append, extend, and pop operations to update + the parsed list elements; and with dictionary-style item set and del operations + to add, update, or remove any named results. If the tokens are modified in place, + it is not necessary to return them with a return statement. + + Parse actions can also completely replace the given tokens, with another ``ParseResults`` + object, or with some entirely different object (common for parse actions that perform data + conversions). A convenient way to build a new parse result is to define the values + using a dict, and then create the return value using :class:`ParseResults.from_dict`. + + If None is passed as the ``fn`` parse action, all previously added parse actions for this + expression are cleared. + + Optional keyword arguments: + + - call_during_try = (default= ``False``) indicate if parse action should be run during + lookaheads and alternate testing. For parse actions that have side effects, it is + important to only call the parse action once it is determined that it is being + called as part of a successful parse. For parse actions that perform additional + validation, then call_during_try should be passed as True, so that the validation + code is included in the preliminary "try" parses. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`parse_string` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + + Example:: + + # parse dates in the form YYYY/MM/DD + + # use parse action to convert toks from str to int at parse time + def convert_to_int(toks): + return int(toks[0]) + + # use a parse action to verify that the date is a valid date + def is_valid_date(toks): + from datetime import date + year, month, day = toks[::2] + try: + date(year, month, day) + except ValueError: + raise ParseException("invalid date given") + + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + # add parse actions + integer.set_parse_action(convert_to_int) + date_str.set_parse_action(is_valid_date) + + # note that integer fields are now ints, not strings + date_str.run_tests(''' + # successful parse - note that integer fields were converted to ints + 1999/12/31 + + # fail - invalid date + 1999/13/31 + ''') + """ + if list(fns) == [None]: + self.parseAction = [] + else: + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction = list(map(_trim_arity, list(fns))) + self.callDuringTry = kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": + """ + Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. + + See examples in :class:`copy`. + """ + self.parseAction += list(map(_trim_arity, list(fns))) + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": + """Add a boolean predicate function to expression's list of parse actions. See + :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, + functions passed to ``add_condition`` need to return boolean success/fail of the condition. + + Optional keyword arguments: + + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise + ParseException + - call_during_try = boolean to indicate if this method should be called during internal tryParse calls, + default=False + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), + (line:1, col:1) + """ + for fn in fns: + self.parseAction.append( + condition_as_parse_action( + fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False) + ) + ) + + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": + """ + Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + ``fn(s, loc, expr, err)`` where: + + - s = string being parsed + - loc = location where expression match was attempted and failed + - expr = the parse expression that failed + - err = the exception thrown + + The function returns no value. It may throw :class:`ParseFatalException` + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables(self, instring, loc): + exprsFound = True + while exprsFound: + exprsFound = False + for e in self.ignoreExprs: + try: + while 1: + loc, dummy = e._parse(instring, loc) + exprsFound = True + except ParseException: + pass + return loc + + def preParse(self, instring, loc): + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + + if self.skipWhitespace: + instrlen = len(instring) + white_chars = self.whiteChars + while loc < instrlen and instring[loc] in white_chars: + loc += 1 + + return loc + + def parseImpl(self, instring, loc, doActions=True): + return loc, [] + + def postParse(self, instring, loc, tokenlist): + return tokenlist + + # @profile + def _parseNoCache( + self, instring, loc, doActions=True, callPreParse=True + ) -> Tuple[int, ParseResults]: + TRY, MATCH, FAIL = 0, 1, 2 + debugging = self.debug # and doActions) + len_instring = len(instring) + + if debugging or self.failAction: + # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) + try: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.debugActions[TRY]: + self.debugActions[TRY](instring, tokens_start, self) + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except Exception as err: + # print("Exception raised:", err) + if self.debugActions[FAIL]: + self.debugActions[FAIL](instring, tokens_start, self, err) + if self.failAction: + self.failAction(instring, tokens_start, self, err) + raise + else: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + + tokens = self.postParse(instring, loc, tokens) + + ret_tokens = ParseResults( + tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults + ) + if self.parseAction and (doActions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + except Exception as err: + # print "Exception raised in user parse action:", err + if self.debugActions[FAIL]: + self.debugActions[FAIL](instring, tokens_start, self, err) + raise + else: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + if debugging: + # print("Matched", self, "->", ret_tokens.as_list()) + if self.debugActions[MATCH]: + self.debugActions[MATCH](instring, tokens_start, loc, self, ret_tokens) + + return loc, ret_tokens + + def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int: + try: + return self._parse(instring, loc, doActions=False)[0] + except ParseFatalException: + if raise_fatal: + raise + raise ParseException(instring, loc, self.errmsg, self) + + def can_parse_next(self, instring: str, loc: int) -> bool: + try: + self.try_parse(instring, loc) + except (ParseException, IndexError): + return False + else: + return True + + # cache for left-recursion in Forward references + recursion_lock = RLock() + recursion_memos: DictType[ + Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] + ] = {} + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = ( + {} + ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( + self, instring, loc, doActions=True, callPreParse=True + ) -> Tuple[int, ParseResults]: + HIT, MISS = 0, 1 + TRY, MATCH, FAIL = 0, 1, 2 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy(), loc)) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if self.debug and self.debugActions[TRY]: + try: + self.debugActions[TRY](instring, loc, self, cache_hit=True) + except TypeError: + pass + if isinstance(value, Exception): + if self.debug and self.debugActions[FAIL]: + try: + self.debugActions[FAIL]( + instring, loc, self, value, cache_hit=True + ) + except TypeError: + pass + raise value + + loc_, result, endloc = value[0], value[1].copy(), value[2] + if self.debug and self.debugActions[MATCH]: + try: + self.debugActions[MATCH]( + instring, loc_, endloc, self, result, cache_hit=True + ) + except TypeError: + pass + + return loc_, result + + _parse = _parseNoCache + + @staticmethod + def reset_cache() -> None: + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len( + ParserElement.packrat_cache_stats + ) + ParserElement.recursion_memos.clear() + + _packratEnabled = False + _left_recursion_enabled = False + + @staticmethod + def disable_memoization() -> None: + """ + Disables active Packrat or Left Recursion parsing and their memoization + + This method also works if neither Packrat nor Left Recursion are enabled. + This makes it safe to call before activating Packrat nor Left Recursion + to clear any previous settings. + """ + ParserElement.reset_cache() + ParserElement._left_recursion_enabled = False + ParserElement._packratEnabled = False + ParserElement._parse = ParserElement._parseNoCache + + @staticmethod + def enable_left_recursion( + cache_size_limit: OptionalType[int] = None, *, force=False + ) -> None: + """ + Enables "bounded recursion" parsing, which allows for both direct and indirect + left-recursion. During parsing, left-recursive :class:`Forward` elements are + repeatedly matched with a fixed recursion depth that is gradually increased + until finding the longest match. + + Example:: + + import pyparsing as pp + pp.ParserElement.enable_left_recursion() + + E = pp.Forward("E") + num = pp.Word(pp.nums) + # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... + E <<= E + '+' - num | num + + print(E.parse_string("1+2+3")) + + Recursion search naturally memoizes matches of ``Forward`` elements and may + thus skip reevaluation of parse actions during backtracking. This may break + programs with parse actions which rely on strict ordering of side-effects. + + Parameters: + + - cache_size_limit - (default=``None``) - memoize at most this many + ``Forward`` elements during matching; if ``None`` (the default), + memoize all ``Forward`` elements. + + Bounded Recursion parsing works similar but not identical to Packrat parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._packratEnabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + if cache_size_limit is None: + ParserElement.recursion_memos = _UnboundedMemo() + elif cache_size_limit > 0: + ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) + else: + raise NotImplementedError("Memo size of %s" % cache_size_limit) + ParserElement._left_recursion_enabled = True + + @staticmethod + def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: + """ + Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + + - cache_size_limit - (default= ``128``) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method :class:`ParserElement.enable_packrat`. + For best results, call ``enable_packrat()`` immediately after + importing pyparsing. + + Example:: + + import pyparsing + pyparsing.ParserElement.enable_packrat() + + Packrat parsing works similar but not identical to Bounded Recursion parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._left_recursion_enabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + if not ParserElement._packratEnabled: + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = _UnboundedCache() + else: + ParserElement.packrat_cache = _FifoCache(cache_size_limit) + ParserElement._parse = ParserElement._parseCache + + def parse_string( + self, instring: str, parse_all: bool = False, *, parseAll: bool = False + ) -> ParseResults: + """ + Parse a string with respect to the parser definition. This function is intended as the primary interface to the + client code. + + :param instring: The input string to be parsed. + :param parse_all: If set, the entire input string must match the grammar. + :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. + :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. + :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or + an object with attributes if the given parser includes results names. + + If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This + is also equivalent to ending the grammar with :class:`StringEnd`(). + + To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are + converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string + contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string + being parsed, one can ensure a consistent view of the input string by doing one of the following: + + - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), + - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the + parse action's ``s`` argument, or + - explicitly expand the tabs in your input string before calling ``parse_string``. + + Examples: + + By default, partial matches are OK. + + >>> res = Word('a').parse_string('aaaaabaaa') + >>> print(res) + ['aaaaa'] + + The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children + directly to see more examples. + + It raises an exception if parse_all flag is set and instring does not match the whole grammar. + + >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) + Traceback (most recent call last): + ... + pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) + """ + parseAll = parse_all or parseAll + + ParserElement.reset_cache() + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse(instring, 0) + if parseAll: + loc = self.preParse(instring, loc) + se = Empty() + StringEnd() + se._parse(instring, loc) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + raise exc.with_traceback(None) + else: + return tokens + + def scan_string( + self, + instring: str, + max_matches: int = _MAX_INT, + overlap: bool = False, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> Generator[Tuple[ParseResults, int, int], None, None]: + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + ``max_matches`` argument, to clip scanning after 'n' matches are found. If + ``overlap`` is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See :class:`parse_string` for more information on parsing + strings with embedded tabs. + + Example:: + + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens, start, end in Word(alphas).scan_string(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + maxMatches = min(maxMatches, max_matches) + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = str(instring).expandtabs() + instrlen = len(instring) + loc = 0 + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + try: + while loc <= instrlen and matches < maxMatches: + try: + preloc = preparseFn(instring, loc) + nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) + except ParseException: + loc = preloc + 1 + else: + if nextLoc > loc: + matches += 1 + if debug: + print( + { + "tokens": tokens.asList(), + "start": preloc, + "end": nextLoc, + } + ) + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn(instring, loc) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc + 1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def transform_string(self, instring: str, *, debug: bool = False) -> str: + """ + Extension to :class:`scan_string`, to modify matching text with modified tokens that may + be returned from a parse action. To use ``transform_string``, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking ``transform_string()`` on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. ``transform_string()`` returns the resulting transformed string. + + Example:: + + wd = Word(alphas) + wd.set_parse_action(lambda toks: toks[0].title()) + + print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) + + prints:: + + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ + out = [] + lastE = 0 + # force preservation of s, to minimize unwanted transformation of string, and to + # keep string locs straight between transform_string and scan_string + self.keepTabs = True + try: + for t, s, e in self.scan_string(instring, debug=debug): + out.append(instring[lastE:s]) + if t: + if isinstance(t, ParseResults): + out += t.as_list() + elif isinstance(t, Iterable) and not isinstance(t, str_type): + out += list(t) + else: + out.append(t) + lastE = e + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join(map(str, _flatten(out))) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def search_string( + self, + instring: str, + max_matches: int = _MAX_INT, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> ParseResults: + """ + Another extension to :class:`scan_string`, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + ``max_matches`` argument, to clip searching after 'n' matches are found. + + Example:: + + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) + + prints:: + + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + maxMatches = min(maxMatches, max_matches) + try: + return ParseResults( + [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] + ) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def split( + self, + instring: str, + maxsplit: int = _MAX_INT, + include_separators: bool = False, + *, + includeSeparators=False, + ) -> Generator[str, None, None]: + """ + Generator method to split a string using the given expression as a separator. + May be called with optional ``maxsplit`` argument, to limit the number of splits; + and the optional ``include_separators`` argument (default= ``False``), if the separating + matching text should be included in the split results. + + Example:: + + punc = one_of(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + + prints:: + + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + includeSeparators = includeSeparators or include_separators + last = 0 + for t, s, e in self.scan_string(instring, max_matches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other): + """ + Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` + converts them to :class:`Literal`s by default. + + Example:: + + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + + prints:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + + ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. + + Literal('start') + ... + Literal('end') + + is equivalent to: + + Literal('start') + SkipTo('end')("_skipped*") + Literal('end') + + Note that the skipped text is returned with '_skipped' as a results name, + and to support having multiple skips in the same parser, the value returned is + a list of all skipped text. + """ + if other is Ellipsis: + return _PendingSkip(self) + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return And([self, other]) + + def __radd__(self, other): + """ + Implementation of ``+`` operator when left operand is not a :class:`ParserElement` + """ + if other is Ellipsis: + return SkipTo(self)("_skipped*") + self + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other + self + + def __sub__(self, other): + """ + Implementation of ``-`` operator, returns :class:`And` with error stop + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return self + And._ErrorStop() + other + + def __rsub__(self, other): + """ + Implementation of ``-`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other - self + + def __mul__(self, other): + """ + Implementation of ``*`` operator, allows use of ``expr * 3`` in place of + ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer + tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples + may also include ``None`` as in: + - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` + + Note that ``expr*(None, n)`` does not raise an exception if + more than n exprs exist in the input stream; that is, + ``expr*(None, n)`` does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + ``expr*(None, n) + ~expr`` + """ + if other is Ellipsis: + other = (0, None) + elif isinstance(other, tuple) and other[:1] == (Ellipsis,): + other = ((0,) + other[1:] + (None,))[:2] + + if isinstance(other, int): + minElements, optElements = other, 0 + elif isinstance(other, tuple): + other = tuple(o if o is not Ellipsis else None for o in other) + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0], int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self * other[0] + ZeroOrMore(self) + elif isinstance(other[0], int) and isinstance(other[1], int): + minElements, optElements = other + optElements -= minElements + else: + raise TypeError( + "cannot multiply ParserElement and ({}) objects".format( + ",".join(type(item).__name__ for item in other) + ) + ) + else: + raise TypeError( + "cannot multiply ParserElement and {} objects".format( + type(other).__name__ + ) + ) + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError( + "second tuple value must be greater or equal to first tuple value" + ) + if minElements == optElements == 0: + return And([]) + + if optElements: + + def makeOptionalList(n): + if n > 1: + return Opt(self + makeOptionalList(n - 1)) + else: + return Opt(self) + + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self] * minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self] * minElements) + return ret + + def __rmul__(self, other): + return self.__mul__(other) + + def __or__(self, other): + """ + Implementation of ``|`` operator - returns :class:`MatchFirst` + """ + if other is Ellipsis: + return _PendingSkip(self, must_skip=True) + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return MatchFirst([self, other]) + + def __ror__(self, other): + """ + Implementation of ``|`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other | self + + def __xor__(self, other): + """ + Implementation of ``^`` operator - returns :class:`Or` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return Or([self, other]) + + def __rxor__(self, other): + """ + Implementation of ``^`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other ^ self + + def __and__(self, other): + """ + Implementation of ``&`` operator - returns :class:`Each` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return Each([self, other]) + + def __rand__(self, other): + """ + Implementation of ``&`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other & self + + def __invert__(self): + """ + Implementation of ``~`` operator - returns :class:`NotAny` + """ + return NotAny(self) + + # disable __iter__ to override legacy use of sequential access to __getitem__ to + # iterate over a sequence + __iter__ = None + + def __getitem__(self, key): + """ + use ``[]`` indexing notation as a short form for expression repetition: + + - ``expr[n]`` is equivalent to ``expr*n`` + - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` + - ``expr[n, ...]`` or ``expr[n,]`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` + - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` + + ``None`` may be used in place of ``...``. + + Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception + if more than ``n`` ``expr``s exist in the input stream. If this behavior is + desired, then write ``expr[..., n] + ~expr``. + """ + + # convert single arg keys to tuples + try: + if isinstance(key, str_type): + key = (key,) + iter(key) + except TypeError: + key = (key, key) + + if len(key) > 2: + raise TypeError( + "only 1 or 2 index arguments supported ({}{})".format( + key[:5], "... [{}]".format(len(key)) if len(key) > 5 else "" + ) + ) + + # clip to 2 elements + ret = self * tuple(key[:2]) + return ret + + def __call__(self, name: str = None): + """ + Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. + + If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be + passed as ``True``. + + If ``name` is omitted, same as calling :class:`copy`. + + Example:: + + # these are equivalent + userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") + userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") + """ + if name is not None: + return self._setResultsName(name) + else: + return self.copy() + + def suppress(self) -> "ParserElement": + """ + Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress(self) + + def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": + """ + Enables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. + + :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = True + return self + + def leave_whitespace(self, recursive: bool = True) -> "ParserElement": + """ + Disables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + + :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = False + return self + + def set_whitespace_chars( + self, chars: Union[Set[str], str], copy_defaults: bool = False + ) -> "ParserElement": + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = set(chars) + self.copyDefaultWhiteChars = copy_defaults + return self + + def parse_with_tabs(self) -> "ParserElement": + """ + Overrides default behavior to expand ```` s to spaces before parsing the input string. + Must be called before ``parse_string`` when the input grammar contains elements that + match ```` characters. + """ + self.keepTabs = True + return self + + def ignore(self, other: "ParserElement") -> "ParserElement": + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + + patt = OneOrMore(Word(alphas)) + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj'] + + patt.ignore(c_style_comment) + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj', 'lskjd'] + """ + import typing + + if isinstance(other, str_type): + other = Suppress(other) + + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append(Suppress(other.copy())) + return self + + def set_debug_actions( + self, + start_action: DebugStartAction, + success_action: DebugSuccessAction, + exception_action: DebugExceptionAction, + ) -> "ParserElement": + """ + Customize display of debugging messages while doing pattern matching: + + - ``start_action`` - method to be called when an expression is about to be parsed; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` + + - ``success_action`` - method to be called when an expression has successfully parsed; + should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` + + - ``exception_action`` - method to be called when expression fails to parse; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` + """ + self.debugActions = ( + start_action or _default_start_debug_action, + success_action or _default_success_debug_action, + exception_action or _default_exception_debug_action, + ) + self.debug = True + return self + + def set_debug(self, flag=True) -> "ParserElement": + """ + Enable display of debugging messages while doing pattern matching. + Set ``flag`` to ``True`` to enable, ``False`` to disable. + + Example:: + + wd = Word(alphas).set_name("alphaword") + integer = Word(nums).set_name("numword") + term = wd | integer + + # turn on debugging for wd + wd.set_debug() + + OneOrMore(term).parse_string("abc 123 xyz 890") + + prints:: + + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using :class:`set_debug_actions`. Prior to attempting + to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` + is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` + message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. + """ + if flag: + self.set_debug_actions( + _default_start_debug_action, + _default_success_debug_action, + _default_exception_debug_action, + ) + else: + self.debug = False + return self + + @property + def default_name(self) -> str: + if self._defaultName is None: + self._defaultName = self._generateDefaultName() + return self._defaultName + + @abstractmethod + def _generateDefaultName(self): + """ + Child classes must define this method, which defines how the ``default_name`` is set. + """ + + def set_name(self, name: str) -> "ParserElement": + """ + Define name for this expression, makes debugging and exception messages clearer. + Example:: + Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) + Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ + self.customName = name + self.errmsg = "Expected " + self.name + if __diag__.enable_debug_on_named_expressions: + self.set_debug() + return self + + @property + def name(self) -> str: + # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name + return self.customName if self.customName is not None else self.default_name + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return str(self) + + def streamline(self) -> "ParserElement": + self.streamlined = True + self._defaultName = None + return self + + def recurse(self): + return [] + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.recurse(): + e._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None): + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ + self._checkRecursion([]) + + def parse_file( + self, + file_or_filename: Union[str, Path, TextIO], + encoding: str = "utf-8", + parse_all: bool = False, + *, + parseAll: bool = False, + ) -> ParseResults: + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + parseAll = parseAll or parse_all + try: + file_contents = file_or_filename.read() + except AttributeError: + with open(file_or_filename, "r", encoding=encoding) as f: + file_contents = f.read() + try: + return self.parse_string(file_contents, parseAll) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def __eq__(self, other): + if self is other: + return True + elif isinstance(other, str_type): + return self.matches(other, parse_all=True) + elif isinstance(other, ParserElement): + return vars(self) == vars(other) + return False + + def __hash__(self): + return id(self) + + def matches( + self, test_string: str, parse_all: bool = True, *, parseAll: bool = True + ) -> bool: + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + Parameters: + - ``test_string`` - to test against this expression for a match + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + + Example:: + + expr = Word(nums) + assert expr.matches("100") + """ + parseAll = parseAll and parse_all + try: + self.parse_string(str(test_string), parse_all=parseAll) + return True + except ParseBaseException: + return False + + def run_tests( + self, + tests: Union[str, List[str]], + parse_all: bool = True, + comment: OptionalType[Union["ParserElement", str]] = "#", + full_dump: bool = True, + print_results: bool = True, + failure_tests: bool = False, + post_parse: Callable[[str, ParseResults], str] = None, + file: OptionalType[TextIO] = None, + with_line_numbers: bool = False, + *, + parseAll: bool = True, + fullDump: bool = True, + printResults: bool = True, + failureTests: bool = False, + postParse: Callable[[str, ParseResults], str] = None, + ): + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + - ``tests`` - a list of separate test strings, or a multiline string of test strings + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - ``print_results`` - (default= ``True``) prints test output to stdout + - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing + - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as + `fn(test_string, parse_results)` and returns a string to be added to the test output + - ``file`` - (default= ``None``) optional file-like object to which test output will be written; + if None, will default to ``sys.stdout`` + - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if ``failure_tests`` is True), and the results contain a list of lines of each + test's output + + Example:: + + number_expr = pyparsing_common.number.copy() + + result = number_expr.run_tests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.run_tests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failure_tests=True) + print("Success" if result[0] else "Failed!") + + prints:: + + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading ``'r'``.) + """ + from .testing import pyparsing_test + + parseAll = parseAll and parse_all + fullDump = fullDump and full_dump + printResults = printResults and print_results + failureTests = failureTests or failure_tests + postParse = postParse or post_parse + if isinstance(tests, str_type): + tests = list(map(type(tests).strip, tests.rstrip().splitlines())) + if isinstance(comment, str_type): + comment = Literal(comment) + if file is None: + file = sys.stdout + print_ = file.write + + result: Union[ParseResults, Exception] + allResults = [] + comments = [] + success = True + NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) + BOM = "\ufeff" + for t in tests: + if comment is not None and comment.matches(t, False) or comments and not t: + comments.append(pyparsing_test.with_line_numbers(t)) + continue + if not t: + continue + out = [ + "\n" + "\n".join(comments) if comments else "", + pyparsing_test.with_line_numbers(t) if with_line_numbers else t, + ] + comments = [] + try: + # convert newline marks to actual newlines, and strip leading BOM if present + t = NL.transform_string(t.lstrip(BOM)) + result = self.parse_string(t, parse_all=parseAll) + except ParseBaseException as pe: + fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" + out.append(pe.explain()) + out.append("FAIL: " + str(pe)) + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(pe.__traceback__)) + success = success and failureTests + result = pe + except Exception as exc: + out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc)) + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(exc.__traceback__)) + success = success and failureTests + result = exc + else: + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + if isinstance(pp_value, ParseResults): + out.append(pp_value.dump()) + else: + out.append(str(pp_value)) + else: + out.append(result.dump()) + except Exception as e: + out.append(result.dump(full=fullDump)) + out.append( + "{} failed: {}: {}".format( + postParse.__name__, type(e).__name__, e + ) + ) + else: + out.append(result.dump(full=fullDump)) + out.append("") + + if printResults: + print_("\n".join(out)) + + allResults.append((t, result)) + + return success, allResults + + def create_diagram( + self, + output_html: Union[TextIO, Path, str], + vertical: int = 3, + show_results_names: bool = False, + **kwargs, + ) -> None: + """ + Create a railroad diagram for the parser. + + Parameters: + - output_html (str or file-like object) - output target for generated + diagram HTML + - vertical (int) - threshold for formatting multiple alternatives vertically + instead of horizontally (default=3) + - show_results_names - bool flag whether diagram should show annotations for + defined results names + + Additional diagram-formatting keyword arguments can also be included; + see railroad.Diagram class. + """ + + try: + from .diagram import to_railroad, railroad_to_html + except ImportError as ie: + raise Exception( + "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" + ) from ie + + self.streamline() + + railroad = to_railroad( + self, + vertical=vertical, + show_results_names=show_results_names, + diagram_kwargs=kwargs, + ) + if isinstance(output_html, (str, Path)): + with open(output_html, "w", encoding="utf-8") as diag_file: + diag_file.write(railroad_to_html(railroad)) + else: + # we were passed a file-like object, just write to it + output_html.write(railroad_to_html(railroad)) + + setDefaultWhitespaceChars = set_default_whitespace_chars + inlineLiteralsUsing = inline_literals_using + setResultsName = set_results_name + setBreak = set_break + setParseAction = set_parse_action + addParseAction = add_parse_action + addCondition = add_condition + setFailAction = set_fail_action + tryParse = try_parse + canParseNext = can_parse_next + resetCache = reset_cache + enableLeftRecursion = enable_left_recursion + enablePackrat = enable_packrat + parseString = parse_string + scanString = scan_string + searchString = search_string + transformString = transform_string + setWhitespaceChars = set_whitespace_chars + parseWithTabs = parse_with_tabs + setDebugActions = set_debug_actions + setDebug = set_debug + defaultName = default_name + setName = set_name + parseFile = parse_file + runTests = run_tests + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class _PendingSkip(ParserElement): + # internal placeholder class to hold a place were '...' is added to a parser element, + # once another ParserElement is added, this placeholder will be replaced with a SkipTo + def __init__(self, expr: ParserElement, must_skip: bool = False): + super().__init__() + self.anchor = expr + self.must_skip = must_skip + + def _generateDefaultName(self): + return str(self.anchor + Empty()).replace("Empty", "...") + + def __add__(self, other): + skipper = SkipTo(other).set_name("...")("_skipped*") + if self.must_skip: + + def must_skip(t): + if not t._skipped or t._skipped.as_list() == [""]: + del t[0] + t.pop("_skipped", None) + + def show_skip(t): + if t._skipped.as_list()[-1:] == [""]: + t.pop("_skipped") + t["_skipped"] = "missing <" + repr(self.anchor) + ">" + + return ( + self.anchor + skipper().add_parse_action(must_skip) + | skipper().add_parse_action(show_skip) + ) + other + + return self.anchor + skipper + other + + def __repr__(self): + return self.defaultName + + def parseImpl(self, *args): + raise Exception( + "use of `...` expression without following SkipTo target expression" + ) + + +class Token(ParserElement): + """Abstract :class:`ParserElement` subclass, for defining atomic + matching patterns. + """ + + def __init__(self): + super().__init__(savelist=False) + + def _generateDefaultName(self): + return type(self).__name__ + + +class Empty(Token): + """ + An empty token, will always match. + """ + + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + + +class NoMatch(Token): + """ + A token that will never match. + """ + + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl(self, instring, loc, doActions=True): + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """ + Token to exactly match a specified string. + + Example:: + + Literal('blah').parse_string('blah') # -> ['blah'] + Literal('blah').parse_string('blahfooblah') # -> ['blah'] + Literal('blah').parse_string('bla') # -> Exception: Expected "blah" + + For case-insensitive matching, use :class:`CaselessLiteral`. + + For keyword matching (force word break before and after the matched string), + use :class:`Keyword` or :class:`CaselessKeyword`. + """ + + def __init__(self, match_string: str = "", *, matchString: str = ""): + super().__init__() + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + try: + self.firstMatchChar = match_string[0] + except IndexError: + raise ValueError("null string passed to Literal; use Empty() instead") + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + + # Performance tuning: modify __class__ to select + # a parseImpl optimized for single-character check + if self.matchLen == 1 and type(self) is Literal: + self.__class__ = _SingleCharLiteral + + def _generateDefaultName(self): + return repr(self.match) + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar and instring.startswith( + self.match, loc + ): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +class _SingleCharLiteral(Literal): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar: + return loc + 1, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +ParserElement._literalStringClass = Literal + + +class Keyword(Token): + """ + Token to exactly match a specified string as a keyword, that is, + it must be immediately followed by a non-keyword character. Compare + with :class:`Literal`: + + - ``Literal("if")`` will match the leading ``'if'`` in + ``'ifAndOnlyIf'``. + - ``Keyword("if")`` will not; it will only match the leading + ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` + + Accepts two optional constructor arguments in addition to the + keyword string: + + - ``identChars`` is a string of characters that would be valid + identifier characters, defaulting to all alphanumerics + "_" and + "$" + - ``caseless`` allows case-insensitive matching, default is ``False``. + + Example:: + + Keyword("start").parse_string("start") # -> ['start'] + Keyword("start").parse_string("starting") # -> Exception + + For case-insensitive matching, use :class:`CaselessKeyword`. + """ + + DEFAULT_KEYWORD_CHARS = alphanums + "_$" + + def __init__( + self, + match_string: str = "", + ident_chars: OptionalType[str] = None, + caseless: bool = False, + *, + matchString: str = "", + identChars: OptionalType[str] = None, + ): + super().__init__() + identChars = identChars or ident_chars + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + try: + self.firstMatchChar = match_string[0] + except IndexError: + raise ValueError("null string passed to Keyword; use Empty() instead") + self.errmsg = "Expected {} {}".format(type(self).__name__, self.name) + self.mayReturnEmpty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = match_string.upper() + identChars = identChars.upper() + self.identChars = set(identChars) + + def _generateDefaultName(self): + return repr(self.match) + + def parseImpl(self, instring, loc, doActions=True): + errmsg = self.errmsg + errloc = loc + if self.caseless: + if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: + if loc == 0 or instring[loc - 1].upper() not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen].upper() not in self.identChars + ): + return loc + self.matchLen, self.match + else: + # followed by keyword char + errmsg += ", was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + else: + if ( + instring[loc] == self.firstMatchChar + and self.matchLen == 1 + or instring.startswith(self.match, loc) + ): + if loc == 0 or instring[loc - 1] not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars + ): + return loc + self.matchLen, self.match + else: + # followed by keyword char + errmsg += ( + ", keyword was immediately followed by keyword character" + ) + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + raise ParseException(instring, errloc, errmsg, self) + + @staticmethod + def set_default_keyword_chars(chars): + """ + Overrides the default characters used by :class:`Keyword` expressions. + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + + setDefaultKeywordChars = set_default_keyword_chars + + +class CaselessLiteral(Literal): + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + + OneOrMore(CaselessLiteral("CMD")).parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for :class:`CaselessKeyword`.) + """ + + def __init__(self, match_string: str = "", *, matchString: str = ""): + match_string = matchString or match_string + super().__init__(match_string.upper()) + # Preserve the defining literal. + self.returnString = match_string + self.errmsg = "Expected " + self.name + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc : loc + self.matchLen].upper() == self.match: + return loc + self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + + +class CaselessKeyword(Keyword): + """ + Caseless version of :class:`Keyword`. + + Example:: + + OneOrMore(CaselessKeyword("CMD")).parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD'] + + (Contrast with example for :class:`CaselessLiteral`.) + """ + + def __init__( + self, + match_string: str = "", + ident_chars: OptionalType[str] = None, + *, + matchString: str = "", + identChars: OptionalType[str] = None, + ): + identChars = identChars or ident_chars + match_string = matchString or match_string + super().__init__(match_string, identChars, caseless=True) + + +class CloseMatch(Token): + """A variation on :class:`Literal` which matches "close" matches, + that is, strings with at most 'n' mismatching characters. + :class:`CloseMatch` takes parameters: + + - ``match_string`` - string to be matched + - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters + - ``max_mismatches`` - (``default=1``) maximum number of + mismatches allowed to count as a match + + The results from a successful parse will contain the matched text + from the input string and the following named results: + + - ``mismatches`` - a list of the positions within the + match_string where mismatches were found + - ``original`` - the original match_string used to compare + against the input string + + If ``mismatches`` is an empty list, then the match was an exact + match. + + Example:: + + patt = CloseMatch("ATCATCGAATGGA") + patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) + patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + + def __init__( + self, + match_string: str, + max_mismatches: int = None, + *, + maxMismatches: int = 1, + caseless=False, + ): + maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches + super().__init__() + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = "Expected {!r} (with up to {} mismatches)".format( + self.match_string, self.maxMismatches + ) + self.caseless = caseless + self.mayIndexError = False + self.mayReturnEmpty = False + + def _generateDefaultName(self): + return "{}:{!r}".format(type(self).__name__, self.match_string) + + def parseImpl(self, instring, loc, doActions=True): + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc, s_m in enumerate( + zip(instring[loc:maxloc], match_string) + ): + src, mat = s_m + if self.caseless: + src, mat = src.lower(), mat.lower() + + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = start + match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results["original"] = match_string + results["mismatches"] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """Token for matching words composed of allowed character sets. + Parameters: + - ``init_chars`` - string of all characters that should be used to + match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; + if ``body_chars`` is also specified, then this is the string of + initial characters + - ``body_chars`` - string of characters that + can be used for matching after a matched initial character as + given in ``init_chars``; if omitted, same as the initial characters + (default=``None``) + - ``min`` - minimum number of characters to match (default=1) + - ``max`` - maximum number of characters to match (default=0) + - ``exact`` - exact number of characters to match (default=0) + - ``as_keyword`` - match as a keyword (default=``False``) + - ``exclude_chars`` - characters that might be + found in the input ``body_chars`` string but which should not be + accepted for matching ;useful to define a word of all + printables except for one or two characters, for instance + (default=``None``) + + :class:`srange` is useful for defining custom character set strings + for defining :class:`Word` expressions, using range notation from + regular expression character sets. + + A common mistake is to use :class:`Word` to match a specific literal + string, as in ``Word("Address")``. Remember that :class:`Word` + uses the string argument to define *sets* of matchable characters. + This expression would match "Add", "AAA", "dAred", or any other word + made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an + exact literal string, use :class:`Literal` or :class:`Keyword`. + + pyparsing includes helper strings for building Words: + + - :class:`alphas` + - :class:`nums` + - :class:`alphanums` + - :class:`hexnums` + - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 + - accented, tilded, umlauted, etc.) + - :class:`punc8bit` (non-alphabetic characters in ASCII range + 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - :class:`printables` (any non-whitespace character) + + ``alphas``, ``nums``, and ``printables`` are also defined in several + Unicode sets - see :class:`pyparsing_unicode``. + + Example:: + + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capital_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums + '-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, exclude_chars=",") + """ + + def __init__( + self, + init_chars: str = "", + body_chars: OptionalType[str] = None, + min: int = 1, + max: int = 0, + exact: int = 0, + as_keyword: bool = False, + exclude_chars: OptionalType[str] = None, + *, + initChars: OptionalType[str] = None, + bodyChars: OptionalType[str] = None, + asKeyword: bool = False, + excludeChars: OptionalType[str] = None, + ): + initChars = initChars or init_chars + bodyChars = bodyChars or body_chars + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__() + if not initChars: + raise ValueError( + "invalid {}, initChars cannot be empty string".format( + type(self).__name__ + ) + ) + + initChars = set(initChars) + self.initChars = initChars + if excludeChars: + excludeChars = set(excludeChars) + initChars -= excludeChars + if bodyChars: + bodyChars = set(bodyChars) - excludeChars + self.initCharsOrig = "".join(sorted(initChars)) + + if bodyChars: + self.bodyCharsOrig = "".join(sorted(bodyChars)) + self.bodyChars = set(bodyChars) + else: + self.bodyCharsOrig = "".join(sorted(initChars)) + self.bodyChars = set(initChars) + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.asKeyword = asKeyword + + # see if we can make a regex for this Word + if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0): + if self.bodyChars == self.initChars: + if max == 0: + repeat = "+" + elif max == 1: + repeat = "" + else: + repeat = "{{{},{}}}".format( + self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen + ) + self.reString = "[{}]{}".format( + _collapse_string_to_ranges(self.initChars), + repeat, + ) + elif len(self.initChars) == 1: + if max == 0: + repeat = "*" + else: + repeat = "{{0,{}}}".format(max - 1) + self.reString = "{}[{}]{}".format( + re.escape(self.initCharsOrig), + _collapse_string_to_ranges(self.bodyChars), + repeat, + ) + else: + if max == 0: + repeat = "*" + elif max == 2: + repeat = "" + else: + repeat = "{{0,{}}}".format(max - 1) + self.reString = "[{}][{}]{}".format( + _collapse_string_to_ranges(self.initChars), + _collapse_string_to_ranges(self.bodyChars), + repeat, + ) + if self.asKeyword: + self.reString = r"\b" + self.reString + r"\b" + + try: + self.re = re.compile(self.reString) + except sre_constants.error: + self.re = None + else: + self.re_match = self.re.match + self.__class__ = _WordRegex + + def _generateDefaultName(self): + def charsAsStr(s): + max_repr_len = 16 + s = _collapse_string_to_ranges(s, re_escape=False) + if len(s) > max_repr_len: + return s[: max_repr_len - 3] + "..." + else: + return s + + if self.initChars != self.bodyChars: + base = "W:({}, {})".format( + charsAsStr(self.initChars), charsAsStr(self.bodyChars) + ) + else: + base = "W:({})".format(charsAsStr(self.initChars)) + + # add length specification + if self.minLen > 1 or self.maxLen != _MAX_INT: + if self.minLen == self.maxLen: + if self.minLen == 1: + return base[2:] + else: + return base + "{{{}}}".format(self.minLen) + elif self.maxLen == _MAX_INT: + return base + "{{{},...}}".format(self.minLen) + else: + return base + "{{{},{}}}".format(self.minLen, self.maxLen) + return base + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.initChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + bodychars = self.bodyChars + maxloc = start + self.maxLen + maxloc = min(maxloc, instrlen) + while loc < maxloc and instring[loc] in bodychars: + loc += 1 + + throwException = False + if loc - start < self.minLen: + throwException = True + elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + throwException = True + elif self.asKeyword: + if ( + start > 0 + and instring[start - 1] in bodychars + or loc < instrlen + and instring[loc] in bodychars + ): + throwException = True + + if throwException: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class _WordRegex(Word): + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + +class Char(_WordRegex): + """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, + when defining a match of any single character in a string of + characters. + """ + + def __init__( + self, + charset: str, + as_keyword: bool = False, + exclude_chars: OptionalType[str] = None, + *, + asKeyword: bool = False, + excludeChars: OptionalType[str] = None, + ): + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__( + charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars + ) + self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars)) + if asKeyword: + self.reString = r"\b{}\b".format(self.reString) + self.re = re.compile(self.reString) + self.re_match = self.re.match + + +class Regex(Token): + r"""Token for matching strings that match a given regular + expression. Defined with string specifying the regular expression in + a form recognized by the stdlib Python `re module `_. + If the given regex contains named groups (defined using ``(?P...)``), + these will be preserved as named :class:`ParseResults`. + + If instead of the Python stdlib ``re`` module you wish to use a different RE module + (such as the ``regex`` module), you can do so by building your ``Regex`` object with + a compiled RE that was compiled using ``regex``. + + Example:: + + realnum = Regex(r"[+-]?\d+\.\d*") + # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + + # named fields in a regex will be returned as named results + date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') + + # the Regex class will accept re's compiled using the regex module + import regex + parser = pp.Regex(regex.compile(r'[0-9]')) + """ + + def __init__( + self, + pattern: Any, + flags: Union[re.RegexFlag, int] = 0, + as_group_list: bool = False, + as_match: bool = False, + *, + asGroupList: bool = False, + asMatch: bool = False, + ): + """The parameters ``pattern`` and ``flags`` are passed + to the ``re.compile()`` function as-is. See the Python + `re module `_ module for an + explanation of the acceptable patterns and flags. + """ + super().__init__() + asGroupList = asGroupList or as_group_list + asMatch = asMatch or as_match + + if isinstance(pattern, str_type): + if not pattern: + raise ValueError("null string passed to Regex; use Empty() instead") + + self.pattern = pattern + self.flags = flags + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + raise ValueError( + "invalid pattern ({!r}) passed to Regex".format(pattern) + ) + + elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): + self.re = pattern + self.pattern = self.reString = pattern.pattern + self.flags = flags + + else: + raise TypeError( + "Regex may only be constructed with a string or a compiled RE object" + ) + + self.re_match = self.re.match + + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = self.re_match("") is not None + self.asGroupList = asGroupList + self.asMatch = asMatch + if self.asGroupList: + self.parseImpl = self.parseImplAsGroupList + if self.asMatch: + self.parseImpl = self.parseImplAsMatch + + def _generateDefaultName(self): + return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) + + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = ParseResults(result.group()) + d = result.groupdict() + if d: + for k, v in d.items(): + ret[k] = v + return loc, ret + + def parseImplAsGroupList(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.groups() + return loc, ret + + def parseImplAsMatch(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result + return loc, ret + + def sub(self, repl): + r""" + Return :class:`Regex` with an attached parse action to transform the parsed + result as if called using `re.sub(expr, repl, string) `_. + + Example:: + + make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") + print(make_html.transform_string("h1:main title:")) + # prints "

main title

" + """ + if self.asGroupList: + raise TypeError("cannot use sub() with Regex(asGroupList=True)") + + if self.asMatch and callable(repl): + raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)") + + if self.asMatch: + + def pa(tokens): + return tokens[0].expand(repl) + + else: + + def pa(tokens): + return self.re.sub(repl, tokens[0]) + + return self.add_parse_action(pa) + + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + + - ``quote_char`` - string of one or more characters defining the + quote delimiting string + - ``esc_char`` - character to re_escape quotes, typically backslash + (default= ``None``) + - ``esc_quote`` - special quote sequence to re_escape an embedded quote + string (such as SQL's ``""`` to re_escape an embedded ``"``) + (default= ``None``) + - ``multiline`` - boolean indicating whether quotes can span + multiple lines (default= ``False``) + - ``unquote_results`` - boolean indicating whether the matched text + should be unquoted (default= ``True``) + - ``end_quote_char`` - string of one or more characters defining the + end of the quote delimited string (default= ``None`` => same as + quote_char) + - ``convert_whitespace_escapes`` - convert escaped whitespace + (``'\t'``, ``'\n'``, etc.) to actual whitespace + (default= ``True``) + + Example:: + + qs = QuotedString('"') + print(qs.search_string('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', end_quote_char='}}') + print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', esc_quote='""') + print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + + prints:: + + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] + """ + ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")) + + def __init__( + self, + quote_char: str = "", + esc_char: OptionalType[str] = None, + esc_quote: OptionalType[str] = None, + multiline: bool = False, + unquote_results: bool = True, + end_quote_char: OptionalType[str] = None, + convert_whitespace_escapes: bool = True, + *, + quoteChar: str = "", + escChar: OptionalType[str] = None, + escQuote: OptionalType[str] = None, + unquoteResults: bool = True, + endQuoteChar: OptionalType[str] = None, + convertWhitespaceEscapes: bool = True, + ): + super().__init__() + escChar = escChar or esc_char + escQuote = escQuote or esc_quote + unquoteResults = unquoteResults and unquote_results + endQuoteChar = endQuoteChar or end_quote_char + convertWhitespaceEscapes = ( + convertWhitespaceEscapes and convert_whitespace_escapes + ) + quote_char = quoteChar or quote_char + + # remove white space from quote chars - wont work anyway + quote_char = quote_char.strip() + if not quote_char: + raise ValueError("quote_char cannot be the empty string") + + if endQuoteChar is None: + endQuoteChar = quote_char + else: + endQuoteChar = endQuoteChar.strip() + if not endQuoteChar: + raise ValueError("endQuoteChar cannot be the empty string") + + self.quoteChar = quote_char + self.quoteCharLen = len(quote_char) + self.firstQuoteChar = quote_char[0] + self.endQuoteChar = endQuoteChar + self.endQuoteCharLen = len(endQuoteChar) + self.escChar = escChar + self.escQuote = escQuote + self.unquoteResults = unquoteResults + self.convertWhitespaceEscapes = convertWhitespaceEscapes + + sep = "" + inner_pattern = "" + + if escQuote: + inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote)) + sep = "|" + + if escChar: + inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar)) + sep = "|" + self.escCharReplacePattern = re.escape(self.escChar) + "(.)" + + if len(self.endQuoteChar) > 1: + inner_pattern += ( + "{}(?:".format(sep) + + "|".join( + "(?:{}(?!{}))".format( + re.escape(self.endQuoteChar[:i]), + _escape_regex_range_chars(self.endQuoteChar[i:]), + ) + for i in range(len(self.endQuoteChar) - 1, 0, -1) + ) + + ")" + ) + sep = "|" + + if multiline: + self.flags = re.MULTILINE | re.DOTALL + inner_pattern += r"{}(?:[^{}{}])".format( + sep, + _escape_regex_range_chars(self.endQuoteChar[0]), + (_escape_regex_range_chars(escChar) if escChar is not None else ""), + ) + else: + self.flags = 0 + inner_pattern += r"{}(?:[^{}\n\r{}])".format( + sep, + _escape_regex_range_chars(self.endQuoteChar[0]), + (_escape_regex_range_chars(escChar) if escChar is not None else ""), + ) + + self.pattern = "".join( + [ + re.escape(self.quoteChar), + "(?:", + inner_pattern, + ")*", + re.escape(self.endQuoteChar), + ] + ) + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + self.re_match = self.re.match + except sre_constants.error: + raise ValueError( + "invalid pattern {!r} passed to Regex".format(self.pattern) + ) + + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + + def _generateDefaultName(self): + if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type): + return "string enclosed in {!r}".format(self.quoteChar) + + return "quoted string, starting with {} ending with {}".format( + self.quoteChar, self.endQuoteChar + ) + + def parseImpl(self, instring, loc, doActions=True): + result = ( + instring[loc] == self.firstQuoteChar + and self.re_match(instring, loc) + or None + ) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.group() + + if self.unquoteResults: + + # strip off quotes + ret = ret[self.quoteCharLen : -self.endQuoteCharLen] + + if isinstance(ret, str_type): + # replace escaped whitespace + if "\\" in ret and self.convertWhitespaceEscapes: + for wslit, wschar in self.ws_map: + ret = ret.replace(wslit, wschar) + + # replace escaped characters + if self.escChar: + ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) + + # replace escaped quotes + if self.escQuote: + ret = ret.replace(self.escQuote, self.endQuoteChar) + + return loc, ret + + +class CharsNotIn(Token): + """Token for matching words composed of characters *not* in a given + set (will include whitespace in matched characters if not listed in + the provided exclusion set - see example). Defined with string + containing all disallowed characters, and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. + + Example:: + + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) + + prints:: + + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + + def __init__( + self, + not_chars: str = "", + min: int = 1, + max: int = 0, + exact: int = 0, + *, + notChars: str = "", + ): + super().__init__() + self.skipWhitespace = False + self.notChars = not_chars or notChars + self.notCharsSet = set(self.notChars) + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use " + "Opt(CharsNotIn()) if zero-length char group is permitted" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = self.minLen == 0 + self.mayIndexError = False + + def _generateDefaultName(self): + not_chars_str = _collapse_string_to_ranges(self.notChars) + if len(not_chars_str) > 16: + return "!W:({}...)".format(self.notChars[: 16 - 3]) + else: + return "!W:({})".format(self.notChars) + + def parseImpl(self, instring, loc, doActions=True): + notchars = self.notCharsSet + if instring[loc] in notchars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + maxlen = min(start + self.maxLen, len(instring)) + while loc < maxlen and instring[loc] not in notchars: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class White(Token): + """Special matching class for matching whitespace. Normally, + whitespace is ignored by pyparsing grammars. This class is included + when some whitespace structures are significant. Define with + a string containing the whitespace characters to be matched; default + is ``" \\t\\r\\n"``. Also takes optional ``min``, + ``max``, and ``exact`` arguments, as defined for the + :class:`Word` class. + """ + + whiteStrs = { + " ": "", + "\t": "", + "\n": "", + "\r": "", + "\f": "", + "\u00A0": "", + "\u1680": "", + "\u180E": "", + "\u2000": "", + "\u2001": "", + "\u2002": "", + "\u2003": "", + "\u2004": "", + "\u2005": "", + "\u2006": "", + "\u2007": "", + "\u2008": "", + "\u2009": "", + "\u200A": "", + "\u200B": "", + "\u202F": "", + "\u205F": "", + "\u3000": "", + } + + def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): + super().__init__() + self.matchWhite = ws + self.set_whitespace_chars( + "".join(c for c in self.whiteChars if c not in self.matchWhite), + copy_defaults=True, + ) + # self.leave_whitespace() + self.mayReturnEmpty = True + self.errmsg = "Expected " + self.name + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def _generateDefaultName(self): + return "".join(White.whiteStrs[c] for c in self.matchWhite) + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.matchWhite: + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min(maxloc, len(instring)) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class PositionToken(Token): + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + + +class GoToColumn(PositionToken): + """Token to advance to a specific column of input text; useful for + tabular report scraping. + """ + + def __init__(self, colno: int): + super().__init__() + self.col = colno + + def preParse(self, instring, loc): + if col(loc, instring) != self.col: + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + while ( + loc < instrlen + and instring[loc].isspace() + and col(loc, instring) != self.col + ): + loc += 1 + return loc + + def parseImpl(self, instring, loc, doActions=True): + thiscol = col(loc, instring) + if thiscol > self.col: + raise ParseException(instring, loc, "Text not in expected column", self) + newloc = loc + self.col - thiscol + ret = instring[loc:newloc] + return newloc, ret + + +class LineStart(PositionToken): + r"""Matches if current position is at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + restOfLine).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self): + super().__init__() + self.leave_whitespace() + self.orig_whiteChars = set() | self.whiteChars + self.whiteChars.discard("\n") + self.skipper = Empty().set_whitespace_chars(self.whiteChars) + self.errmsg = "Expected start of line" + + def preParse(self, instring, loc): + if loc == 0: + return loc + else: + ret = self.skipper.preParse(instring, loc) + if "\n" in self.orig_whiteChars: + while instring[ret : ret + 1] == "\n": + ret = self.skipper.preParse(instring, ret + 1) + return ret + + def parseImpl(self, instring, loc, doActions=True): + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + + +class LineEnd(PositionToken): + """Matches if current position is at the end of a line within the + parse string + """ + + def __init__(self): + super().__init__() + self.whiteChars.discard("\n") + self.set_whitespace_chars(self.whiteChars, copy_defaults=False) + self.errmsg = "Expected end of line" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + if instring[loc] == "\n": + return loc + 1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + + +class StringStart(PositionToken): + """Matches if current position is at the beginning of the parse + string + """ + + def __init__(self): + super().__init__() + self.errmsg = "Expected start of text" + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + # see if entire string up to here is just whitespace and ignoreables + if loc != self.preParse(instring, 0): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class StringEnd(PositionToken): + """ + Matches if current position is at the end of the parse string + """ + + def __init__(self): + super().__init__() + self.errmsg = "Expected end of text" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + elif loc > len(instring): + return loc, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + + +class WordStart(PositionToken): + """Matches if the current position is at the beginning of a + :class:`Word`, and is not preceded by any character in a given + set of ``word_chars`` (default= ``printables``). To emulate the + ``\b`` behavior of regular expressions, use + ``WordStart(alphanums)``. ``WordStart`` will also match at + the beginning of the string being parsed, or at the beginning of + a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars != printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.errmsg = "Not at the start of a word" + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + if ( + instring[loc - 1] in self.wordChars + or instring[loc] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class WordEnd(PositionToken): + """Matches if the current position is at the end of a :class:`Word`, + and is not followed by any character in a given set of ``word_chars`` + (default= ``printables``). To emulate the ``\b`` behavior of + regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` + will also match at the end of the string being parsed, or at the end + of a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars != printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.errmsg = "Not at the end of a word" + + def parseImpl(self, instring, loc, doActions=True): + instrlen = len(instring) + if instrlen > 0 and loc < instrlen: + if ( + instring[loc] in self.wordChars + or instring[loc - 1] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class ParseExpression(ParserElement): + """Abstract subclass of ParserElement, for combining and + post-processing parsed tokens. + """ + + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): + super().__init__(savelist) + self.exprs: List[ParserElement] + if isinstance(exprs, _generatorType): + exprs = list(exprs) + + if isinstance(exprs, str_type): + self.exprs = [self._literalStringClass(exprs)] + elif isinstance(exprs, ParserElement): + self.exprs = [exprs] + elif isinstance(exprs, Iterable): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if any(isinstance(expr, str_type) for expr in exprs): + exprs = ( + self._literalStringClass(e) if isinstance(e, str_type) else e + for e in exprs + ) + self.exprs = list(exprs) + else: + try: + self.exprs = list(exprs) + except TypeError: + self.exprs = [exprs] + self.callPreparse = False + + def recurse(self): + return self.exprs[:] + + def append(self, other): + self.exprs.append(other) + self._defaultName = None + return self + + def leave_whitespace(self, recursive=True): + """ + Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().leave_whitespace(recursive) + + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive=True): + """ + Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().ignore_whitespace(recursive) + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.ignore_whitespace(recursive) + return self + + def ignore(self, other): + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + else: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + return self + + def _generateDefaultName(self): + return "{}:({})".format(self.__class__.__name__, str(self.exprs)) + + def streamline(self): + if self.streamlined: + return self + + super().streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) + if len(self.exprs) == 2: + other = self.exprs[0] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = other.exprs[:] + [self.exprs[1]] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = self.exprs[:-1] + other.exprs[:] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = "Expected " + str(self) + + return self + + def validate(self, validateTrace=None): + tmp = (validateTrace if validateTrace is not None else [])[:] + [self] + for e in self.exprs: + e.validate(tmp) + self._checkRecursion([]) + + def copy(self): + ret = super().copy() + ret.exprs = [e.copy() for e in self.exprs] + return ret + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + for e in self.exprs: + if ( + isinstance(e, ParserElement) + and e.resultsName + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "collides with {!r} on contained expression".format( + "warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class And(ParseExpression): + """ + Requires all given :class:`ParseExpression` s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the ``'+'`` operator. + May also be constructed using the ``'-'`` operator, which will + suppress backtracking. + + Example:: + + integer = Word(nums) + name_expr = OneOrMore(Word(alphas)) + + expr = And([integer("id"), name_expr("name"), integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.leave_whitespace() + + def _generateDefaultName(self): + return "-" + + def __init__(self, exprs_arg: IterableType[ParserElement], savelist: bool = True): + exprs: List[ParserElement] = list(exprs_arg) + if exprs and Ellipsis in exprs: + tmp = [] + for i, expr in enumerate(exprs): + if expr is Ellipsis: + if i < len(exprs) - 1: + skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1] + tmp.append(SkipTo(skipto_arg)("_skipped*")) + else: + raise Exception( + "cannot construct And with sequence ending in ..." + ) + else: + tmp.append(expr) + exprs[:] = tmp + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + self.set_whitespace_chars( + self.exprs[0].whiteChars, + copy_defaults=self.exprs[0].copyDefaultWhiteChars, + ) + self.skipWhitespace = self.exprs[0].skipWhitespace + else: + self.mayReturnEmpty = True + self.callPreparse = True + + def streamline(self) -> ParserElement: + # collapse any _PendingSkip's + if self.exprs: + if any( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1] + ): + for i, e in enumerate(self.exprs[:-1]): + if e is None: + continue + if ( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + ): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = None + self.exprs = [e for e in self.exprs if e is not None] + + super().streamline() + + # link any IndentedBlocks to the prior expression + for prev, cur in zip(self.exprs, self.exprs[1:]): + # traverse cur or any first embedded expr of cur looking for an IndentedBlock + # (but watch out for recursive grammar) + seen = set() + while cur: + if id(cur) in seen: + break + seen.add(id(cur)) + if isinstance(cur, IndentedBlock): + prev.add_parse_action( + lambda s, l, t: setattr(cur, "parent_anchor", col(l, s)) + ) + break + subs = cur.recurse() + cur = next(iter(subs), None) + + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl(self, instring, loc, doActions=True): + # pass False as callPreParse arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse( + instring, loc, doActions, callPreParse=False + ) + errorStop = False + for e in self.exprs[1:]: + # if isinstance(e, And._ErrorStop): + if type(e) is And._ErrorStop: + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse(instring, loc, doActions) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException( + instring, len(instring), self.errmsg, self + ) + else: + loc, exprtokens = e._parse(instring, loc, doActions) + if exprtokens or exprtokens.haskeys(): + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + return self.append(other) # And([self, other]) + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e._checkRecursion(subRecCheckList) + if not e.mayReturnEmpty: + break + + def _generateDefaultName(self): + inner = " ".join(str(e) for e in self.exprs) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return "{" + inner + "}" + + +class Or(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the expression that matches the longest + string will be used. May be constructed using the ``'^'`` + operator. + + Example:: + + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) + + prints:: + + [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.saveAsList = False + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + matches = [] + fatals = [] + if all(e.callPreparse for e in self.exprs): + loc = self.preParse(instring, loc) + for e in self.exprs: + try: + loc2 = e.try_parse(instring, loc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parserElement = e + fatals.append(pfe) + maxException = None + maxExcLoc = -1 + except ParseException as err: + if not fatals: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + # re-evaluate all matches in descending order of length of match, in case attached actions + # might change whether or how much they match of the input. + matches.sort(key=itemgetter(0), reverse=True) + + if not doActions: + # no further conditions or parse actions to change the selection of + # alternative, so the first match will be the best match + best_expr = matches[0][1] + return best_expr._parse(instring, loc, doActions) + + longest = -1, None + for loc1, expr1 in matches: + if loc1 <= longest[0]: + # already have a longer match than this one will deliver, we are done + return longest + + try: + loc2, toks = expr1._parse(instring, loc, doActions) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + else: + if loc2 >= loc1: + return loc2, toks + # didn't match as much as before + elif loc2 > longest[0]: + longest = loc2, toks + + if longest != (-1, None): + return longest + + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) + max_fatal = fatals[0] + raise max_fatal + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException( + instring, loc, "no defined alternatives to match", self + ) + + def __ixor__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + return self.append(other) # Or([self, other]) + + def _generateDefaultName(self): + return "{" + " ^ ".join(str(e) for e in self.exprs) + "}" + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "will return a list of all parsed tokens in an And alternative, " + "in prior versions only the first token was returned; enclose" + "contained argument in Group".format( + "warn_multiple_tokens_in_named_alternation", + name, + type(self).__name__, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + +class MatchFirst(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + more than one expression matches, the first one listed is the one that will + match. May be constructed using the ``'|'`` operator. + + Example:: + + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + if self.exprs: + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.saveAsList = False + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + + for e in self.exprs: + try: + return e._parse( + instring, + loc, + doActions, + ) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parserElement = e + raise + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException( + instring, loc, "no defined alternatives to match", self + ) + + def __ior__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + return self.append(other) # MatchFirst([self, other]) + + def _generateDefaultName(self): + return "{" + " | ".join(str(e) for e in self.exprs) + "}" + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "will return a list of all parsed tokens in an And alternative, " + "in prior versions only the first token was returned; enclose" + "contained argument in Group".format( + "warn_multiple_tokens_in_named_alternation", + name, + type(self).__name__, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + +class Each(ParseExpression): + """Requires all given :class:`ParseExpression` s to be found, but in + any order. Expressions may be separated by whitespace. + + May be constructed using the ``'&'`` operator. + + Example:: + + color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) + + shape_spec.run_tests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + + prints:: + + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 + """ + + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = True): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + self.skipWhitespace = True + self.initExprGroups = True + self.saveAsList = True + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, doActions=True): + if self.initExprGroups: + self.opt1map = dict( + (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) + ) + opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] + opt2 = [ + e + for e in self.exprs + if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) + ] + self.optionals = opt1 + opt2 + self.multioptionals = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, _MultipleMatch) + ] + self.multirequired = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, OneOrMore) + ] + self.required = [ + e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) + ] + self.required += self.multirequired + self.initExprGroups = False + + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + multis = self.multioptionals[:] + matchOrder = [] + + keepMatching = True + failed = [] + fatals = [] + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + multis + failed.clear() + fatals.clear() + for e in tmpExprs: + try: + tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parserElement = e + fatals.append(pfe) + failed.append(e) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e), e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + # look for any ParseFatalExceptions + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) + max_fatal = fatals[0] + raise max_fatal + + if tmpReqd: + missing = ", ".join(str(e) for e in tmpReqd) + raise ParseException( + instring, + loc, + "Missing one or more required elements ({})".format(missing), + ) + + # add any unmatched Opts, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] + + total_results = ParseResults([]) + for e in matchOrder: + loc, results = e._parse(instring, loc, doActions) + total_results += results + + return loc, total_results + + def _generateDefaultName(self): + return "{" + " & ".join(str(e) for e in self.exprs) + "}" + + +class ParseElementEnhance(ParserElement): + """Abstract subclass of :class:`ParserElement`, for combining and + post-processing parsed tokens. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + super().__init__(savelist) + if isinstance(expr, str_type): + if issubclass(self._literalStringClass, Token): + expr = self._literalStringClass(expr) + elif issubclass(type(self), self._literalStringClass): + expr = Literal(expr) + else: + expr = self._literalStringClass(Literal(expr)) + self.expr = expr + if expr is not None: + self.mayIndexError = expr.mayIndexError + self.mayReturnEmpty = expr.mayReturnEmpty + self.set_whitespace_chars( + expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars + ) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def recurse(self): + return [self.expr] if self.expr is not None else [] + + def parseImpl(self, instring, loc, doActions=True): + if self.expr is not None: + return self.expr._parse(instring, loc, doActions, callPreParse=False) + else: + raise ParseException("", loc, self.errmsg, self) + + def leave_whitespace(self, recursive=True): + super().leave_whitespace(recursive) + + if recursive: + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive=True): + super().ignore_whitespace(recursive) + + if recursive: + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.ignore_whitespace(recursive) + return self + + def ignore(self, other): + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super().ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + else: + super().ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + return self + + def streamline(self): + super().streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def _checkRecursion(self, parseElementList): + if self in parseElementList: + raise RecursiveGrammarException(parseElementList + [self]) + subRecCheckList = parseElementList[:] + [self] + if self.expr is not None: + self.expr._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None): + if validateTrace is None: + validateTrace = [] + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self): + return "{}:({})".format(self.__class__.__name__, str(self.expr)) + + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class IndentedBlock(ParseElementEnhance): + """ + Expression to match one or more expressions at a given indentation level. + Useful for parsing text where structure is implied by indentation (like Python source code). + """ + + class _Indent(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = "expected indent at column {}".format(ref_col) + self.add_condition(lambda s, l, t: col(l, s) == ref_col) + + class _IndentGreater(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = "expected indent at column greater than {}".format(ref_col) + self.add_condition(lambda s, l, t: col(l, s) > ref_col) + + def __init__( + self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True + ): + super().__init__(expr, savelist=True) + # if recursive: + # raise NotImplementedError("IndentedBlock with recursive is not implemented") + self._recursive = recursive + self._grouped = grouped + self.parent_anchor = 1 + + def parseImpl(self, instring, loc, doActions=True): + # advance parse position to non-whitespace by using an Empty() + # this should be the column to be used for all subsequent indented lines + anchor_loc = Empty().preParse(instring, loc) + + # see if self.expr matches at the current location - if not it will raise an exception + # and no further work is necessary + self.expr.try_parse(instring, anchor_loc, doActions) + + indent_col = col(anchor_loc, instring) + peer_detect_expr = self._Indent(indent_col) + + inner_expr = Empty() + peer_detect_expr + self.expr + if self._recursive: + sub_indent = self._IndentGreater(indent_col) + nested_block = IndentedBlock( + self.expr, recursive=self._recursive, grouped=self._grouped + ) + nested_block.set_debug(self.debug) + nested_block.parent_anchor = indent_col + inner_expr += Opt(sub_indent + nested_block) + + inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") + block = OneOrMore(inner_expr) + + trailing_undent = self._Indent(self.parent_anchor) | StringEnd() + + if self._grouped: + wrapper = Group + else: + wrapper = lambda expr: expr + return (wrapper(block) + Optional(trailing_undent)).parseImpl( + instring, anchor_loc, doActions + ) + + +class AtStringStart(ParseElementEnhance): + """Matches if expression matches at the beginning of the parse + string:: + + AtStringStart(Word(nums)).parse_string("123") + # prints ["123"] + + AtStringStart(Word(nums)).parse_string(" 123") + # raises ParseException + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + raise ParseException(instring, loc, "not found at string start") + return super().parseImpl(instring, loc, doActions) + + +class AtLineStart(ParseElementEnhance): + r"""Matches if an expression matches at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (AtLineStart('AAA') + restOfLine).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, doActions=True): + if col(loc, instring) != 1: + raise ParseException(instring, loc, "not found at line start") + return super().parseImpl(instring, loc, doActions) + + +class FollowedBy(ParseElementEnhance): + """Lookahead matching of the given parse expression. + ``FollowedBy`` does *not* advance the parsing position within + the input string, it only verifies that the specified parse + expression matches at the current position. ``FollowedBy`` + always returns a null token list. If any results names are defined + in the lookahead expression, those *will* be returned for access by + name. + + Example:: + + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + OneOrMore(attr_expr).parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() + + prints:: + + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + # by using self._expr.parse and deleting the contents of the returned ParseResults list + # we keep any named results that were defined in the FollowedBy expression + _, ret = self.expr._parse(instring, loc, doActions=doActions) + del ret[:] + + return loc, ret + + +class PrecededBy(ParseElementEnhance): + """Lookbehind matching of the given parse expression. + ``PrecededBy`` does not advance the parsing position within the + input string, it only verifies that the specified parse expression + matches prior to the current position. ``PrecededBy`` always + returns a null token list, but if a results name is defined on the + given expression, it is returned. + + Parameters: + + - expr - expression that must match prior to the current parse + location + - retreat - (default= ``None``) - (int) maximum number of characters + to lookbehind prior to the current parse location + + If the lookbehind expression is a string, :class:`Literal`, + :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` + with a specified exact or maximum length, then the retreat + parameter is not required. Otherwise, retreat must be specified to + give a maximum number of characters to look back from + the current parse position for a lookbehind match. + + Example:: + + # VB-style variable names with type prefixes + int_var = PrecededBy("#") + pyparsing_common.identifier + str_var = PrecededBy("$") + pyparsing_common.identifier + + """ + + def __init__( + self, expr: Union[ParserElement, str], retreat: OptionalType[int] = None + ): + super().__init__(expr) + self.expr = self.expr().leave_whitespace() + self.mayReturnEmpty = True + self.mayIndexError = False + self.exact = False + if isinstance(expr, str_type): + retreat = len(expr) + self.exact = True + elif isinstance(expr, (Literal, Keyword)): + retreat = expr.matchLen + self.exact = True + elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: + retreat = expr.maxLen + self.exact = True + elif isinstance(expr, PositionToken): + retreat = 0 + self.exact = True + self.retreat = retreat + self.errmsg = "not preceded by " + str(expr) + self.skipWhitespace = False + self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) + + def parseImpl(self, instring, loc=0, doActions=True): + if self.exact: + if loc < self.retreat: + raise ParseException(instring, loc, self.errmsg) + start = loc - self.retreat + _, ret = self.expr._parse(instring, start) + else: + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[max(0, loc - self.retreat) : loc] + last_expr = ParseException(instring, loc, self.errmsg) + for offset in range(1, min(loc, self.retreat + 1) + 1): + try: + # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) + _, ret = test_expr._parse( + instring_slice, len(instring_slice) - offset + ) + except ParseBaseException as pbe: + last_expr = pbe + else: + break + else: + raise last_expr + return loc, ret + + +class Located(ParseElementEnhance): + """ + Decorates a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parse_with_tabs` + + Example:: + + wd = Word(alphas) + for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [0, ['ljsdf'], 5] + [8, ['lksdjjf'], 15] + [18, ['lkkjj'], 23] + + """ + + def parseImpl(self, instring, loc, doActions=True): + start = loc + loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) + ret_tokens = ParseResults([start, tokens, loc]) + ret_tokens["locn_start"] = start + ret_tokens["value"] = tokens + ret_tokens["locn_end"] = loc + if self.resultsName: + # must return as a list, so that the name will be attached to the complete group + return loc, [ret_tokens] + else: + return loc, ret_tokens + + +class NotAny(ParseElementEnhance): + """ + Lookahead to disallow matching with the given parse expression. + ``NotAny`` does *not* advance the parsing position within the + input string, it only verifies that the specified parse expression + does *not* match at the current position. Also, ``NotAny`` does + *not* skip over leading whitespace. ``NotAny`` always returns + a null token list. May be constructed using the ``'~'`` operator. + + Example:: + + AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) + + # take care not to mistake keywords for identifiers + ident = ~(AND | OR | NOT) + Word(alphas) + boolean_term = Opt(NOT) + ident + + # very crude boolean expression - to support parenthesis groups and + # operation hierarchy, use infix_notation + boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) + + # integers that are followed by "." are actually floats + integer = Word(nums) + ~Char(".") + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + # do NOT use self.leave_whitespace(), don't want to propagate to exprs + # self.leave_whitespace() + self.skipWhitespace = False + + self.mayReturnEmpty = True + self.errmsg = "Found unwanted token, " + str(self.expr) + + def parseImpl(self, instring, loc, doActions=True): + if self.expr.can_parse_next(instring, loc): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def _generateDefaultName(self): + return "~{" + str(self.expr) + "}" + + +class _MultipleMatch(ParseElementEnhance): + def __init__( + self, + expr: ParserElement, + stop_on: OptionalType[Union[ParserElement, str]] = None, + *, + stopOn: OptionalType[Union[ParserElement, str]] = None, + ): + super().__init__(expr) + stopOn = stopOn or stop_on + self.saveAsList = True + ender = stopOn + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.stopOn(ender) + + def stopOn(self, ender): + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + return self + + def parseImpl(self, instring, loc, doActions=True): + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.tryParse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse(instring, loc, doActions) + try: + hasIgnoreExprs = not not self.ignoreExprs + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables(instring, loc) + else: + preloc = loc + loc, tmptokens = self_expr_parse(instring, preloc, doActions) + if tmptokens or tmptokens.haskeys(): + tokens += tmptokens + except (ParseException, IndexError): + pass + + return loc, tokens + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + for e in [self.expr] + self.expr.recurse(): + if ( + isinstance(e, ParserElement) + and e.resultsName + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "collides with {!r} on contained expression".format( + "warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + - expr - expression that must match one or more times + - stop_on - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + OneOrMore(attr_expr).parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stop_on attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parse_string(text).pprint() + """ + + def _generateDefaultName(self): + return "{" + str(self.expr) + "}..." + + +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + - ``expr`` - expression that must match zero or more times + - ``stop_on`` - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) - (default= ``None``) + + Example: similar to :class:`OneOrMore` + """ + + def __init__( + self, + expr: ParserElement, + stop_on: OptionalType[Union[ParserElement, str]] = None, + *, + stopOn: OptionalType[Union[ParserElement, str]] = None, + ): + super().__init__(expr, stopOn=stopOn or stop_on) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + try: + return super().parseImpl(instring, loc, doActions) + except (ParseException, IndexError): + return loc, ParseResults([], name=self.resultsName) + + def _generateDefaultName(self): + return "[" + str(self.expr) + "]..." + + +class _NullToken: + def __bool__(self): + return False + + def __str__(self): + return "" + + +class Opt(ParseElementEnhance): + """ + Optional matching of the given expression. + + Parameters: + - ``expr`` - expression that must match zero or more times + - ``default`` (optional) - value to be returned if the optional expression is not found. + + Example:: + + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) + zip.run_tests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + + prints:: + + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) + """ + + __optionalNotMatched = _NullToken() + + def __init__( + self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched + ): + super().__init__(expr, savelist=False) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + self_expr = self.expr + try: + loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) + except (ParseException, IndexError): + default_value = self.defaultValue + if default_value is not self.__optionalNotMatched: + if self_expr.resultsName: + tokens = ParseResults([default_value]) + tokens[self_expr.resultsName] = default_value + else: + tokens = [default_value] + else: + tokens = [] + return loc, tokens + + def _generateDefaultName(self): + inner = str(self.expr) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return "[" + inner + "]" + + +Optional = Opt + + +class SkipTo(ParseElementEnhance): + """ + Token for skipping over all undefined text until the matched + expression is found. + + Parameters: + - ``expr`` - target expression marking the end of the data to be skipped + - ``include`` - if ``True``, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element + list) (default= ``False``). + - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the :class:`SkipTo` is not a match + + Example:: + + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quoted_string) + string_data.set_parse_action(token_map(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.search_string(report): + print tkt.dump() + + prints:: + + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: 6 + - desc: Intermittent system crash + - issue_num: 101 + - sev: Critical + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: 14 + - desc: Spelling error on Login ('log|n') + - issue_num: 94 + - sev: Cosmetic + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: 47 + - desc: System slow when running too many reports + - issue_num: 79 + - sev: Minor + """ + + def __init__( + self, + other: Union[ParserElement, str], + include: bool = False, + ignore: bool = None, + fail_on: OptionalType[Union[ParserElement, str]] = None, + *, + failOn: Union[ParserElement, str] = None, + ): + super().__init__(other) + failOn = failOn or fail_on + self.ignoreExpr = ignore + self.mayReturnEmpty = True + self.mayIndexError = False + self.includeMatch = include + self.saveAsList = False + if isinstance(failOn, str_type): + self.failOn = self._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = "No match found for " + str(self.expr) + + def parseImpl(self, instring, loc, doActions=True): + startloc = loc + instrlen = len(instring) + self_expr_parse = self.expr._parse + self_failOn_canParseNext = ( + self.failOn.canParseNext if self.failOn is not None else None + ) + self_ignoreExpr_tryParse = ( + self.ignoreExpr.tryParse if self.ignoreExpr is not None else None + ) + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if self_ignoreExpr_tryParse is not None: + # advance past ignore expressions + while 1: + try: + tmploc = self_ignoreExpr_tryParse(instring, tmploc) + except ParseBaseException: + break + + try: + self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) + skipresult += mat + + return loc, skipresult + + +class Forward(ParseElementEnhance): + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the ``Forward`` + variable using the ``'<<'`` operator. + + Note: take care when assigning to ``Forward`` not to overlook + precedence of operators. + + Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: + + fwd_expr << a | b | c + + will actually be evaluated as:: + + (fwd_expr << a) | b | c + + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the ``Forward``:: + + fwd_expr << (a | b | c) + + Converting to use the ``'<<='`` operator instead will avoid this problem. + + See :class:`ParseResults.pprint` for an example of a recursive + parser created using ``Forward``. + """ + + def __init__(self, other: OptionalType[Union[ParserElement, str]] = None): + self.caller_frame = traceback.extract_stack(limit=2)[0] + super().__init__(other, savelist=False) + self.lshift_line = None + + def __lshift__(self, other): + if hasattr(self, "caller_frame"): + del self.caller_frame + if isinstance(other, str_type): + other = self._literalStringClass(other) + self.expr = other + self.mayIndexError = self.expr.mayIndexError + self.mayReturnEmpty = self.expr.mayReturnEmpty + self.set_whitespace_chars( + self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars + ) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + self.lshift_line = traceback.extract_stack(limit=2)[-2] + return self + + def __ilshift__(self, other): + return self << other + + def __or__(self, other): + caller_line = traceback.extract_stack(limit=2)[-2] + if ( + __diag__.warn_on_match_first_with_lshift_operator + and caller_line == self.lshift_line + and Diagnostics.warn_on_match_first_with_lshift_operator + not in self.suppress_warnings_ + ): + warnings.warn( + "using '<<' operator with '|' is probably an error, use '<<='", + stacklevel=2, + ) + ret = super().__or__(other) + return ret + + def __del__(self): + # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' + if ( + self.expr is None + and __diag__.warn_on_assignment_to_Forward + and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ + ): + warnings.warn_explicit( + "Forward defined here but no expression attached later using '<<=' or '<<'", + UserWarning, + filename=self.caller_frame.filename, + lineno=self.caller_frame.lineno, + ) + + def parseImpl(self, instring, loc, doActions=True): + if ( + self.expr is None + and __diag__.warn_on_parse_using_empty_Forward + and Diagnostics.warn_on_parse_using_empty_Forward + not in self.suppress_warnings_ + ): + # walk stack until parse_string, scan_string, search_string, or transform_string is found + parse_fns = [ + "parse_string", + "scan_string", + "search_string", + "transform_string", + ] + tb = traceback.extract_stack(limit=200) + for i, frm in enumerate(reversed(tb), start=1): + if frm.name in parse_fns: + stacklevel = i + 1 + break + else: + stacklevel = 2 + warnings.warn( + "Forward expression was never assigned a value, will not parse any input", + stacklevel=stacklevel, + ) + if not ParserElement._left_recursion_enabled: + return super().parseImpl(instring, loc, doActions) + # ## Bounded Recursion algorithm ## + # Recursion only needs to be processed at ``Forward`` elements, since they are + # the only ones that can actually refer to themselves. The general idea is + # to handle recursion stepwise: We start at no recursion, then recurse once, + # recurse twice, ..., until more recursion offers no benefit (we hit the bound). + # + # The "trick" here is that each ``Forward`` gets evaluated in two contexts + # - to *match* a specific recursion level, and + # - to *search* the bounded recursion level + # and the two run concurrently. The *search* must *match* each recursion level + # to find the best possible match. This is handled by a memo table, which + # provides the previous match to the next level match attempt. + # + # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. + # + # There is a complication since we not only *parse* but also *transform* via + # actions: We do not want to run the actions too often while expanding. Thus, + # we expand using `doActions=False` and only run `doActions=True` if the next + # recursion level is acceptable. + with ParserElement.recursion_lock: + memo = ParserElement.recursion_memos + try: + # we are parsing at a specific recursion expansion - use it as-is + prev_loc, prev_result = memo[loc, self, doActions] + if isinstance(prev_result, Exception): + raise prev_result + return prev_loc, prev_result.copy() + except KeyError: + act_key = (loc, self, True) + peek_key = (loc, self, False) + # we are searching for the best recursion expansion - keep on improving + # both `doActions` cases must be tracked separately here! + prev_loc, prev_peek = memo[peek_key] = ( + loc - 1, + ParseException( + instring, loc, "Forward recursion without base case", self + ), + ) + if doActions: + memo[act_key] = memo[peek_key] + while True: + try: + new_loc, new_peek = super().parseImpl(instring, loc, False) + except ParseException: + # we failed before getting any match – do not hide the error + if isinstance(prev_peek, Exception): + raise + new_loc, new_peek = prev_loc, prev_peek + # the match did not get better: we are done + if new_loc <= prev_loc: + if doActions: + # replace the match for doActions=False as well, + # in case the action did backtrack + prev_loc, prev_result = memo[peek_key] = memo[act_key] + del memo[peek_key], memo[act_key] + return prev_loc, prev_result.copy() + del memo[peek_key] + return prev_loc, prev_peek.copy() + # the match did get better: see if we can improve further + else: + if doActions: + try: + memo[act_key] = super().parseImpl(instring, loc, True) + except ParseException as e: + memo[peek_key] = memo[act_key] = (new_loc, e) + raise + prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek + + def leave_whitespace(self, recursive=True): + self.skipWhitespace = False + return self + + def ignore_whitespace(self, recursive=True): + self.skipWhitespace = True + return self + + def streamline(self): + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate(self, validateTrace=None): + if validateTrace is None: + validateTrace = [] + + if self not in validateTrace: + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self): + # Avoid infinite recursion by setting a temporary _defaultName + self._defaultName = ": ..." + + # Use the string representation of main expression. + retString = "..." + try: + if self.expr is not None: + retString = str(self.expr)[:1000] + else: + retString = "None" + finally: + return self.__class__.__name__ + ": " + retString + + def copy(self): + if self.expr is not None: + return super().copy() + else: + ret = Forward() + ret <<= self + return ret + + def _setResultsName(self, name, list_all_matches=False): + if ( + __diag__.warn_name_set_on_empty_Forward + and Diagnostics.warn_name_set_on_empty_Forward + not in self.suppress_warnings_ + ): + if self.expr is None: + warnings.warn( + "{}: setting results name {!r} on {} expression " + "that has no contained expression".format( + "warn_name_set_on_empty_Forward", name, type(self).__name__ + ), + stacklevel=3, + ) + + return super()._setResultsName(name, list_all_matches) + + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of :class:`ParseExpression`, for converting parsed results. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist=False): + super().__init__(expr) # , savelist) + self.saveAsList = False + + +class Combine(TokenConverter): + """Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the + input string; this can be disabled by specifying + ``'adjacent=False'`` in the constructor. + + Example:: + + real = Word(nums) + '.' + Word(nums) + print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parse_string('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) + """ + + def __init__( + self, + expr: ParserElement, + join_string: str = "", + adjacent: bool = True, + *, + joinString: OptionalType[str] = None, + ): + super().__init__(expr) + joinString = joinString if joinString is not None else join_string + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leave_whitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore(self, other): + if self.adjacent: + ParserElement.ignore(self, other) + else: + super().ignore(other) + return self + + def postParse(self, instring, loc, tokenlist): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults( + ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults + ) + + if self.resultsName and retToks.haskeys(): + return [retToks] + else: + return retToks + + +class Group(TokenConverter): + """Converter to return the matched tokens as a list - useful for + returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. + + The optional ``aslist`` argument when set to True will return the + parsed tokens as a Python list instead of a pyparsing ParseResults. + + Example:: + + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Opt(delimited_list(term)) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Opt(delimited_list(term))) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', ['a', 'b', '100']] + """ + + def __init__(self, expr: ParserElement, aslist: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonList = aslist + + def postParse(self, instring, loc, tokenlist): + if self._asPythonList: + return ParseResults.List( + tokenlist.asList() + if isinstance(tokenlist, ParseResults) + else list(tokenlist) + ) + else: + return [tokenlist] + + +class Dict(TokenConverter): + """Converter to return a repetitive expression as a list, but also + as a dictionary. Each element can also be referenced using the first + token in the expression as its key. Useful for tabular report + scraping when the first column can be used as a item key. + + The optional ``asdict`` argument when set to True will return the + parsed tokens as a Python dict instead of a pyparsing ParseResults. + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + # print attributes as plain groups + print(OneOrMore(attr_expr).parse_string(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names + result = Dict(OneOrMore(Group(attr_expr))).parse_string(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.as_dict()) + + prints:: + + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + + See more examples at :class:`ParseResults` of accessing fields by results name. + """ + + def __init__(self, expr: ParserElement, asdict: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonDict = asdict + + def postParse(self, instring, loc, tokenlist): + for i, tok in enumerate(tokenlist): + if len(tok) == 0: + continue + + ikey = tok[0] + if isinstance(ikey, int): + ikey = str(ikey).strip() + + if len(tok) == 1: + tokenlist[ikey] = _ParseResultsWithOffset("", i) + + elif len(tok) == 2 and not isinstance(tok[1], ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) + + else: + try: + dictvalue = tok.copy() # ParseResults(i) + except Exception: + exc = TypeError( + "could not extract dict values from parsed results" + " - Dict expression must contain Grouped expressions" + ) + raise exc from None + + del dictvalue[0] + + if len(dictvalue) != 1 or ( + isinstance(dictvalue, ParseResults) and dictvalue.haskeys() + ): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) + + if self._asPythonDict: + return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() + else: + return [tokenlist] if self.resultsName else tokenlist + + +class Suppress(TokenConverter): + """Converter for ignoring the results of a parsed expression. + + Example:: + + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + ZeroOrMore(',' + wd) + print(wd_list1.parse_string(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) + print(wd_list2.parse_string(source)) + + # Skipped text (using '...') can be suppressed as well + source = "lead in START relevant text END trailing text" + start_marker = Keyword("START") + end_marker = Keyword("END") + find_body = Suppress(...) + start_marker + ... + end_marker + print(find_body.parse_string(source) + + prints:: + + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + ['START', 'relevant text ', 'END'] + + (See also :class:`delimited_list`.) + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + if expr is ...: + expr = _PendingSkip(NoMatch()) + super().__init__(expr) + + def __add__(self, other): + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) + other + else: + return super().__add__(other) + + def __sub__(self, other): + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) - other + else: + return super().__sub__(other) + + def postParse(self, instring, loc, tokenlist): + return [] + + def suppress(self): + return self + + +def trace_parse_action(f: ParseAction): + """Decorator for debugging parse actions. + + When the parse action is called, this decorator will print + ``">> entering method-name(line:, , )"``. + When the parse action completes, the decorator will print + ``"<<"`` followed by the returned value, or any exception that the parse action raised. + + Example:: + + wd = Word(alphas) + + @trace_parse_action + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = OneOrMore(wd).set_parse_action(remove_duplicate_chars) + print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) + + prints:: + + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + < 3: + thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc + sys.stderr.write( + ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t) + ) + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write("< "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + + The input string must be enclosed in []'s, and the returned string + is the expanded character set joined into a single string. The + values enclosed in the []'s may be: + + - a single character + - an escaped character with a leading backslash (such as ``\-`` + or ``\]``) + - an escaped hex character with a leading ``'\x'`` + (``\x21``, which is a ``'!'`` character) (``\0x##`` + is also supported for backwards compatibility) + - an escaped octal character with a leading ``'\0'`` + (``\041``, which is a ``'!'`` character) + - a range of any of the above, separated by a dash (``'a-z'``, + etc.) + - any combination of the above (``'aeiouy'``, + ``'a-zA-Z0-9_$'``, etc.) + """ + _expanded = ( + lambda p: p + if not isinstance(p, ParseResults) + else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) + ) + try: + return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) + except Exception: + return "" + + +def token_map(func, *args): + """Helper to define a parse action by mapping a function to all + elements of a :class:`ParseResults` list. If any additional args are passed, + they are forwarded to the given function as additional arguments + after the token, as in + ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, + which will convert the parsed data to an integer using base 16. + + Example (compare the last to example in :class:`ParserElement.transform_string`:: + + hex_ints = OneOrMore(Word(hexnums)).set_parse_action(token_map(int, 16)) + hex_ints.run_tests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).set_parse_action(token_map(str.upper)) + OneOrMore(upperword).run_tests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).set_parse_action(token_map(str.title)) + OneOrMore(wd).set_parse_action(' '.join).run_tests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + + prints:: + + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + + def pa(s, l, t): + return [func(tokn, *args) for tokn in t] + + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + pa.__name__ = func_name + + return pa + + +def autoname_elements(): + """ + Utility to simplify mass-naming of parser elements, for + generating railroad diagram with named subdiagrams. + """ + for name, var in sys._getframe().f_back.f_locals.items(): + if isinstance(var, ParserElement) and not var.customName: + var.set_name(name) + + +dbl_quoted_string = Combine( + Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' +).set_name("string enclosed in double quotes") + +sgl_quoted_string = Combine( + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" +).set_name("string enclosed in single quotes") + +quoted_string = Combine( + Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' + | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" +).set_name("quotedString using single or double quotes") + +unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") + + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] + +# backward compatibility names +tokenMap = token_map +conditionAsParseAction = condition_as_parse_action +nullDebugAction = null_debug_action +sglQuotedString = sgl_quoted_string +dblQuotedString = dbl_quoted_string +quotedString = quoted_string +unicodeString = unicode_string +lineStart = line_start +lineEnd = line_end +stringStart = string_start +stringEnd = string_end +traceParseAction = trace_parse_action diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/__init__.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/__init__.py new file mode 100644 index 000000000..4f7c41e44 --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/__init__.py @@ -0,0 +1,593 @@ +import railroad +import pyparsing +from pkg_resources import resource_filename +from typing import ( + List, + Optional, + NamedTuple, + Generic, + TypeVar, + Dict, + Callable, + Set, + Iterable, +) +from jinja2 import Template +from io import StringIO +import inspect + +with open(resource_filename(__name__, "template.jinja2"), encoding="utf-8") as fp: + template = Template(fp.read()) + +# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet +NamedDiagram = NamedTuple( + "NamedDiagram", + [("name", str), ("diagram", Optional[railroad.DiagramItem]), ("index", int)], +) +""" +A simple structure for associating a name with a railroad diagram +""" + +T = TypeVar("T") + + +class EachItem(railroad.Group): + """ + Custom railroad item to compose a: + - Group containing a + - OneOrMore containing a + - Choice of the elements in the Each + with the group label indicating that all must be matched + """ + + all_label = "[ALL]" + + def __init__(self, *items): + choice_item = railroad.Choice(len(items) - 1, *items) + one_or_more_item = railroad.OneOrMore(item=choice_item) + super().__init__(one_or_more_item, label=self.all_label) + + +class AnnotatedItem(railroad.Group): + """ + Simple subclass of Group that creates an annotation label + """ + + def __init__(self, label: str, item): + super().__init__(item=item, label="[{}]".format(label)) + + +class EditablePartial(Generic[T]): + """ + Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been + constructed. + """ + + # We need this here because the railroad constructors actually transform the data, so can't be called until the + # entire tree is assembled + + def __init__(self, func: Callable[..., T], args: list, kwargs: dict): + self.func = func + self.args = args + self.kwargs = kwargs + + @classmethod + def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": + """ + If you call this function in the same way that you would call the constructor, it will store the arguments + as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) + """ + return EditablePartial(func=func, args=list(args), kwargs=kwargs) + + @property + def name(self): + return self.kwargs["name"] + + def __call__(self) -> T: + """ + Evaluate the partial and return the result + """ + args = self.args.copy() + kwargs = self.kwargs.copy() + + # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. + # args=['list', 'of', 'things']) + arg_spec = inspect.getfullargspec(self.func) + if arg_spec.varargs in self.kwargs: + args += kwargs.pop(arg_spec.varargs) + + return self.func(*args, **kwargs) + + +def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: + """ + Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams + :params kwargs: kwargs to be passed in to the template + """ + data = [] + for diagram in diagrams: + io = StringIO() + diagram.diagram.writeSvg(io.write) + title = diagram.name + if diagram.index == 0: + title += " (root)" + data.append({"title": title, "text": "", "svg": io.getvalue()}) + + return template.render(diagrams=data, **kwargs) + + +def resolve_partial(partial: "EditablePartial[T]") -> T: + """ + Recursively resolves a collection of Partials into whatever type they are + """ + if isinstance(partial, EditablePartial): + partial.args = resolve_partial(partial.args) + partial.kwargs = resolve_partial(partial.kwargs) + return partial() + elif isinstance(partial, list): + return [resolve_partial(x) for x in partial] + elif isinstance(partial, dict): + return {key: resolve_partial(x) for key, x in partial.items()} + else: + return partial + + +def to_railroad( + element: pyparsing.ParserElement, + diagram_kwargs: Optional[dict] = None, + vertical: int = 3, + show_results_names: bool = False, +) -> List[NamedDiagram]: + """ + Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram + creation if you want to access the Railroad tree before it is converted to HTML + :param element: base element of the parser being diagrammed + :param diagram_kwargs: kwargs to pass to the Diagram() constructor + :param vertical: (optional) - int - limit at which number of alternatives should be + shown vertically instead of horizontally + :param show_results_names - bool to indicate whether results name annotations should be + included in the diagram + """ + # Convert the whole tree underneath the root + lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) + _to_diagram_element( + element, + lookup=lookup, + parent=None, + vertical=vertical, + show_results_names=show_results_names, + ) + + root_id = id(element) + # Convert the root if it hasn't been already + if root_id in lookup: + if not element.customName: + lookup[root_id].name = "" + lookup[root_id].mark_for_extraction(root_id, lookup, force=True) + + # Now that we're finished, we can convert from intermediate structures into Railroad elements + diags = list(lookup.diagrams.values()) + if len(diags) > 1: + # collapse out duplicate diags with the same name + seen = set() + deduped_diags = [] + for d in diags: + # don't extract SkipTo elements, they are uninformative as subdiagrams + if d.name == "...": + continue + if d.name is not None and d.name not in seen: + seen.add(d.name) + deduped_diags.append(d) + resolved = [resolve_partial(partial) for partial in deduped_diags] + else: + # special case - if just one diagram, always display it, even if + # it has no name + resolved = [resolve_partial(partial) for partial in diags] + return sorted(resolved, key=lambda diag: diag.index) + + +def _should_vertical( + specification: int, exprs: Iterable[pyparsing.ParserElement] +) -> bool: + """ + Returns true if we should return a vertical list of elements + """ + if specification is None: + return False + else: + return len(_visible_exprs(exprs)) >= specification + + +class ElementState: + """ + State recorded for an individual pyparsing Element + """ + + # Note: this should be a dataclass, but we have to support Python 3.5 + def __init__( + self, + element: pyparsing.ParserElement, + converted: EditablePartial, + parent: EditablePartial, + number: int, + name: str = None, + parent_index: Optional[int] = None, + ): + #: The pyparsing element that this represents + self.element: pyparsing.ParserElement = element + #: The name of the element + self.name: str = name + #: The output Railroad element in an unconverted state + self.converted: EditablePartial = converted + #: The parent Railroad element, which we store so that we can extract this if it's duplicated + self.parent: EditablePartial = parent + #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram + self.number: int = number + #: The index of this inside its parent + self.parent_index: Optional[int] = parent_index + #: If true, we should extract this out into a subdiagram + self.extract: bool = False + #: If true, all of this element's children have been filled out + self.complete: bool = False + + def mark_for_extraction( + self, el_id: int, state: "ConverterState", name: str = None, force: bool = False + ): + """ + Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram + :param el_id: id of the element + :param state: element/diagram state tracker + :param name: name to use for this element's text + :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the + root element when we know we're finished + """ + self.extract = True + + # Set the name + if not self.name: + if name: + # Allow forcing a custom name + self.name = name + elif self.element.customName: + self.name = self.element.customName + else: + self.name = "" + + # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children + # to be added + # Also, if this is just a string literal etc, don't bother extracting it + if force or (self.complete and _worth_extracting(self.element)): + state.extract_into_diagram(el_id) + + +class ConverterState: + """ + Stores some state that persists between recursions into the element tree + """ + + def __init__(self, diagram_kwargs: Optional[dict] = None): + #: A dictionary mapping ParserElements to state relating to them + self._element_diagram_states: Dict[int, ElementState] = {} + #: A dictionary mapping ParserElement IDs to subdiagrams generated from them + self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} + #: The index of the next unnamed element + self.unnamed_index: int = 1 + #: The index of the next element. This is used for sorting + self.index: int = 0 + #: Shared kwargs that are used to customize the construction of diagrams + self.diagram_kwargs: dict = diagram_kwargs or {} + self.extracted_diagram_names: Set[str] = set() + + def __setitem__(self, key: int, value: ElementState): + self._element_diagram_states[key] = value + + def __getitem__(self, key: int) -> ElementState: + return self._element_diagram_states[key] + + def __delitem__(self, key: int): + del self._element_diagram_states[key] + + def __contains__(self, key: int): + return key in self._element_diagram_states + + def generate_unnamed(self) -> int: + """ + Generate a number used in the name of an otherwise unnamed diagram + """ + self.unnamed_index += 1 + return self.unnamed_index + + def generate_index(self) -> int: + """ + Generate a number used to index a diagram + """ + self.index += 1 + return self.index + + def extract_into_diagram(self, el_id: int): + """ + Used when we encounter the same token twice in the same tree. When this + happens, we replace all instances of that token with a terminal, and + create a new subdiagram for the token + """ + position = self[el_id] + + # Replace the original definition of this element with a regular block + if position.parent: + ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) + if "item" in position.parent.kwargs: + position.parent.kwargs["item"] = ret + elif "items" in position.parent.kwargs: + position.parent.kwargs["items"][position.parent_index] = ret + + # If the element we're extracting is a group, skip to its content but keep the title + if position.converted.func == railroad.Group: + content = position.converted.kwargs["item"] + else: + content = position.converted + + self.diagrams[el_id] = EditablePartial.from_call( + NamedDiagram, + name=position.name, + diagram=EditablePartial.from_call( + railroad.Diagram, content, **self.diagram_kwargs + ), + index=position.number, + ) + + del self[el_id] + + +def _worth_extracting(element: pyparsing.ParserElement) -> bool: + """ + Returns true if this element is worth having its own sub-diagram. Simply, if any of its children + themselves have children, then its complex enough to extract + """ + children = element.recurse() + return any(child.recurse() for child in children) + + +def _apply_diagram_item_enhancements(fn): + """ + decorator to ensure enhancements to a diagram item (such as results name annotations) + get applied on return from _to_diagram_element (we do this since there are several + returns in _to_diagram_element) + """ + + def _inner( + element: pyparsing.ParserElement, + parent: Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, + ) -> Optional[EditablePartial]: + + ret = fn( + element, + parent, + lookup, + vertical, + index, + name_hint, + show_results_names, + ) + + # apply annotation for results name, if present + if show_results_names and ret is not None: + element_results_name = element.resultsName + if element_results_name: + # add "*" to indicate if this is a "list all results" name + element_results_name += "" if element.modalResults else "*" + ret = EditablePartial.from_call( + railroad.Group, item=ret, label=element_results_name + ) + + return ret + + return _inner + + +def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): + non_diagramming_exprs = ( + pyparsing.ParseElementEnhance, + pyparsing.PositionToken, + pyparsing.And._ErrorStop, + ) + return [ + e + for e in exprs + if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) + ] + + +@_apply_diagram_item_enhancements +def _to_diagram_element( + element: pyparsing.ParserElement, + parent: Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, +) -> Optional[EditablePartial]: + """ + Recursively converts a PyParsing Element to a railroad Element + :param lookup: The shared converter state that keeps track of useful things + :param index: The index of this element within the parent + :param parent: The parent of this element in the output tree + :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), + it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never + do so + :param name_hint: If provided, this will override the generated name + :param show_results_names: bool flag indicating whether to add annotations for results names + :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed + """ + exprs = element.recurse() + name = name_hint or element.customName or element.__class__.__name__ + + # Python's id() is used to provide a unique identifier for elements + el_id = id(element) + + element_results_name = element.resultsName + + # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram + if not element.customName: + if isinstance( + element, + ( + pyparsing.TokenConverter, + # pyparsing.Forward, + pyparsing.Located, + ), + ): + # However, if this element has a useful custom name, and its child does not, we can pass it on to the child + if exprs: + if not exprs[0].customName: + propagated_name = name + else: + propagated_name = None + + return _to_diagram_element( + element.expr, + parent=parent, + lookup=lookup, + vertical=vertical, + index=index, + name_hint=propagated_name, + show_results_names=show_results_names, + ) + + # If the element isn't worth extracting, we always treat it as the first time we say it + if _worth_extracting(element): + if el_id in lookup: + # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, + # so we have to extract it into a new diagram. + looked_up = lookup[el_id] + looked_up.mark_for_extraction(el_id, lookup, name=name_hint) + ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) + return ret + + elif el_id in lookup.diagrams: + # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we + # just put in a marker element that refers to the sub-diagram + ret = EditablePartial.from_call( + railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] + ) + return ret + + # Recursively convert child elements + # Here we find the most relevant Railroad element for matching pyparsing Element + # We use ``items=[]`` here to hold the place for where the child elements will go once created + if isinstance(element, pyparsing.And): + # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat + # (all will have the same name, and resultsName) + if not exprs: + return None + if len(set((e.name, e.resultsName) for e in exprs)) == 1: + ret = EditablePartial.from_call( + railroad.OneOrMore, item="", repeat=str(len(exprs)) + ) + elif _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Stack, items=[]) + else: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): + if not exprs: + return None + if _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) + else: + ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) + elif isinstance(element, pyparsing.Each): + if not exprs: + return None + ret = EditablePartial.from_call(EachItem, items=[]) + elif isinstance(element, pyparsing.NotAny): + ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") + elif isinstance(element, pyparsing.FollowedBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") + elif isinstance(element, pyparsing.PrecededBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") + elif isinstance(element, pyparsing.Opt): + ret = EditablePartial.from_call(railroad.Optional, item="") + elif isinstance(element, pyparsing.OneOrMore): + ret = EditablePartial.from_call(railroad.OneOrMore, item="") + elif isinstance(element, pyparsing.ZeroOrMore): + ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") + elif isinstance(element, pyparsing.Group): + ret = EditablePartial.from_call( + railroad.Group, item=None, label=element_results_name + ) + elif isinstance(element, pyparsing.Empty) and not element.customName: + # Skip unnamed "Empty" elements + ret = None + elif len(exprs) > 1: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif len(exprs) > 0 and not element_results_name: + ret = EditablePartial.from_call(railroad.Group, item="", label=name) + else: + terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) + ret = terminal + + if ret is None: + return + + # Indicate this element's position in the tree so we can extract it if necessary + lookup[el_id] = ElementState( + element=element, + converted=ret, + parent=parent, + parent_index=index, + number=lookup.generate_index(), + ) + if element.customName: + lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) + + i = 0 + for expr in exprs: + # Add a placeholder index in case we have to extract the child before we even add it to the parent + if "items" in ret.kwargs: + ret.kwargs["items"].insert(i, None) + + item = _to_diagram_element( + expr, + parent=ret, + lookup=lookup, + vertical=vertical, + index=i, + show_results_names=show_results_names, + ) + + # Some elements don't need to be shown in the diagram + if item is not None: + if "item" in ret.kwargs: + ret.kwargs["item"] = item + elif "items" in ret.kwargs: + # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal + ret.kwargs["items"][i] = item + i += 1 + elif "items" in ret.kwargs: + # If we're supposed to skip this element, remove it from the parent + del ret.kwargs["items"][i] + + # If all this items children are none, skip this item + if ret and ( + ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) + or ("item" in ret.kwargs and ret.kwargs["item"] is None) + ): + ret = EditablePartial.from_call(railroad.Terminal, name) + + # Mark this element as "complete", ie it has all of its children + if el_id in lookup: + lookup[el_id].complete = True + + if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: + lookup.extract_into_diagram(el_id) + if ret is not None: + ret = EditablePartial.from_call( + railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] + ) + + return ret diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/template.jinja2 b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/template.jinja2 new file mode 100644 index 000000000..d2219fb01 --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/template.jinja2 @@ -0,0 +1,26 @@ + + + + {% if not head %} + + {% else %} + {{ hear | safe }} + {% endif %} + + +{{ body | safe }} +{% for diagram in diagrams %} +
+

{{ diagram.title }}

+
{{ diagram.text }}
+
+ {{ diagram.svg }} +
+
+{% endfor %} + + diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/exceptions.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/exceptions.py new file mode 100644 index 000000000..e06513eb0 --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/exceptions.py @@ -0,0 +1,267 @@ +# exceptions.py + +import re +import sys +from typing import Optional + +from .util import col, line, lineno, _collapse_string_to_ranges +from .unicode import pyparsing_unicode as ppu + + +class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic): + pass + + +_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums) +_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") + + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, + pstr: str, + loc: int = 0, + msg: Optional[str] = None, + elem=None, + ): + self.loc = loc + if msg is None: + self.msg = pstr + self.pstr = "" + else: + self.msg = msg + self.pstr = pstr + self.parser_element = self.parserElement = elem + self.args = (pstr, loc, msg) + + @staticmethod + def explain_exception(exc, depth=16): + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + """ + import inspect + from .core import ParserElement + + if depth is None: + depth = sys.getrecursionlimit() + ret = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(" " * (exc.column - 1) + "^") + ret.append("{}: {}".format(type(exc).__name__, exc)) + + if depth > 0: + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen = set() + for i, ff in enumerate(callers[-depth:]): + frm = ff[0] + + f_self = frm.f_locals.get("self", None) + if isinstance(f_self, ParserElement): + if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"): + continue + if id(f_self) in seen: + continue + seen.add(id(f_self)) + + self_type = type(f_self) + ret.append( + "{}.{} - {}".format( + self_type.__module__, self_type.__name__, f_self + ) + ) + + elif f_self is not None: + self_type = type(f_self) + ret.append("{}.{}".format(self_type.__module__, self_type.__name__)) + + else: + code = frm.f_code + if code.co_name in ("wrapper", ""): + continue + + ret.append("{}".format(code.co_name)) + + depth -= 1 + if not depth: + break + + return "\n".join(ret) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) + + @property + def line(self) -> str: + """ + Return the line of text where the exception occurred. + """ + return line(self.loc, self.pstr) + + @property + def lineno(self) -> int: + """ + Return the 1-based line number of text where the exception occurred. + """ + return lineno(self.loc, self.pstr) + + @property + def col(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @property + def column(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + def __str__(self) -> str: + if self.pstr: + if self.loc >= len(self.pstr): + foundstr = ", found end of text" + else: + # pull out next word at error location + found_match = _exception_word_extractor.match(self.pstr, self.loc) + if found_match is not None: + found = found_match.group(0) + else: + found = self.pstr[self.loc : self.loc + 1] + foundstr = (", found %r" % found).replace(r"\\", "\\") + else: + foundstr = "" + return "{}{} (at char {}), (line:{}, col:{})".format( + self.msg, foundstr, self.loc, self.lineno, self.column + ) + + def __repr__(self): + return str(self) + + def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str: + """ + Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + markerString = marker_string if marker_string is not None else markerString + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = "".join( + (line_str[:line_column], markerString, line_str[line_column:]) + ) + return line_str.strip() + + def explain(self, depth=16) -> str: + """ + Method to translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Example:: + + expr = pp.Word(pp.nums) * 3 + try: + expr.parse_string("123 456 A789") + except pp.ParseException as pe: + print(pe.explain(depth=0)) + + prints:: + + 123 456 A789 + ^ + ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `set_name` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + Note: pyparsing's default truncation of exception tracebacks may also truncate the + stack of expressions that are displayed in the ``explain`` output. To get the full listing + of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` + """ + return self.explain_exception(self, depth) + + markInputline = mark_input_line + + +class ParseException(ParseBaseException): + """ + Exception thrown when a parse expression doesn't match the input string + + Example:: + + try: + Word(nums).set_name("integer").parse_string("ABC") + except ParseException as pe: + print(pe) + print("column: {}".format(pe.column)) + + prints:: + + Expected integer (at char 0), (line:1, col:1) + column: 1 + + """ + + +class ParseFatalException(ParseBaseException): + """ + User-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately + """ + + +class ParseSyntaxException(ParseFatalException): + """ + Just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ + + +class RecursiveGrammarException(Exception): + """ + Exception thrown by :class:`ParserElement.validate` if the + grammar could be left-recursive; parser may need to enable + left recursion using :class:`ParserElement.enable_left_recursion` + """ + + def __init__(self, parseElementList): + self.parseElementTrace = parseElementList + + def __str__(self) -> str: + return "RecursiveGrammarException: {}".format(self.parseElementTrace) diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/helpers.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/helpers.py new file mode 100644 index 000000000..7d6119712 --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/helpers.py @@ -0,0 +1,1059 @@ +# helpers.py +import html.entities +import re + +from . import __diag__ +from .core import * +from .util import _bslash, _flatten, _escape_regex_range_chars + + +# +# global helpers +# +def delimited_list( + expr: Union[str, ParserElement], + delim: Union[str, ParserElement] = ",", + combine: bool = False, + *, + allow_trailing_delim: bool = False, +) -> ParserElement: + """Helper to define a delimited list of expressions - the delimiter + defaults to ','. By default, the list elements and delimiters can + have intervening whitespace, and comments, but this can be + overridden by passing ``combine=True`` in the constructor. If + ``combine`` is set to ``True``, the matching tokens are + returned as a single token string, with the delimiters included; + otherwise, the matching tokens are returned as a list of tokens, + with the delimiters suppressed. + + If ``allow_trailing_delim`` is set to True, then the list may end with + a delimiter. + + Example:: + + delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] + delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] + """ + if isinstance(expr, str_type): + expr = ParserElement._literalStringClass(expr) + + dlName = "{expr} [{delim} {expr}]...{end}".format( + expr=str(expr.streamline()), + delim=str(delim), + end=" [{}]".format(str(delim)) if allow_trailing_delim else "", + ) + + if not combine: + delim = Suppress(delim) + + delimited_list_expr = expr + ZeroOrMore(delim + expr) + + if allow_trailing_delim: + delimited_list_expr += Opt(delim) + + if combine: + return Combine(delimited_list_expr).set_name(dlName) + else: + return delimited_list_expr.set_name(dlName) + + +def counted_array( + expr: ParserElement, + int_expr: OptionalType[ParserElement] = None, + *, + intExpr: OptionalType[ParserElement] = None, +) -> ParserElement: + """Helper to define a counted list of expressions. + + This helper defines a pattern of the form:: + + integer expr expr expr... + + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the + leading count token is suppressed. + + If ``int_expr`` is specified, it should be a pyparsing expression + that produces an integer value. + + Example:: + + counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) + counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] + + # if other fields must be parsed after the count but before the + # list items, give the fields results names and they will + # be preserved in the returned ParseResults: + count_with_metadata = integer + Word(alphas)("type") + typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") + result = typed_array.parse_string("3 bool True True False") + print(result.dump()) + + # prints + # ['True', 'True', 'False'] + # - items: ['True', 'True', 'False'] + # - type: 'bool' + """ + intExpr = intExpr or int_expr + array_expr = Forward() + + def count_field_parse_action(s, l, t): + nonlocal array_expr + n = t[0] + array_expr <<= (expr * n) if n else Empty() + # clear list contents, but keep any named results + del t[:] + + if intExpr is None: + intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.set_name("arrayLen") + intExpr.add_parse_action(count_field_parse_action, call_during_try=True) + return (intExpr + array_expr).set_name("(len) " + str(expr) + "...") + + +def match_previous_literal(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_literal(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches a previous literal, will also match the leading + ``"1:1"`` in ``"1:10"``. If this is not desired, use + :class:`match_previous_expr`. Do *not* use with packrat parsing + enabled. + """ + rep = Forward() + + def copy_token_to_repeater(s, l, t): + if t: + if len(t) == 1: + rep << t[0] + else: + # flatten t tokens + tflat = _flatten(t.as_list()) + rep << And(Literal(tt) for tt in tflat) + else: + rep << Empty() + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def match_previous_expr(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_expr(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches by expressions, will *not* match the leading ``"1:1"`` + in ``"1:10"``; the expressions are evaluated first, and then + compared, so ``"1"`` is compared with ``"10"``. Do *not* use + with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + + def copy_token_to_repeater(s, l, t): + matchTokens = _flatten(t.as_list()) + + def must_match_these_tokens(s, l, t): + theseTokens = _flatten(t.as_list()) + if theseTokens != matchTokens: + raise ParseException("", 0, "") + + rep.set_parse_action(must_match_these_tokens, callDuringTry=True) + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def one_of( + strs: Union[IterableType[str], str], + caseless: bool = False, + use_regex: bool = True, + as_keyword: bool = False, + *, + useRegex: bool = True, + asKeyword: bool = False, +) -> ParserElement: + """Helper to quickly define a set of alternative :class:`Literal` s, + and makes sure to do longest-first testing when there is a conflict, + regardless of the input order, but returns + a :class:`MatchFirst` for best performance. + + Parameters: + + - ``strs`` - a string of space-delimited literals, or a collection of + string literals + - ``caseless`` - treat all literals as caseless - (default= ``False``) + - ``use_regex`` - as an optimization, will + generate a :class:`Regex` object; otherwise, will generate + a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if + creating a :class:`Regex` raises an exception) - (default= ``True``) + - ``as_keyword`` - enforce :class:`Keyword`-style matching on the + generated expressions - (default= ``False``) + - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, + but will be removed in a future release + + Example:: + + comp_oper = one_of("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) + + prints:: + + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + asKeyword = asKeyword or as_keyword + useRegex = useRegex and use_regex + + if ( + isinstance(caseless, str_type) + and __diag__.warn_on_multiple_string_args_to_oneof + ): + warnings.warn( + "More than one string argument passed to one_of, pass" + " choices as a list or space-delimited string", + stacklevel=2, + ) + + if caseless: + isequal = lambda a, b: a.upper() == b.upper() + masks = lambda a, b: b.upper().startswith(a.upper()) + parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral + else: + isequal = lambda a, b: a == b + masks = lambda a, b: b.startswith(a) + parseElementClass = Keyword if asKeyword else Literal + + symbols = [] + if isinstance(strs, str_type): + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + raise TypeError("Invalid argument to one_of, expected string or iterable") + if not symbols: + return NoMatch() + + # reorder given symbols to take care to avoid masking longer choices with shorter ones + # (but only if the given symbols are not just single characters) + if any(len(sym) > 1 for sym in symbols): + i = 0 + while i < len(symbols) - 1: + cur = symbols[i] + for j, other in enumerate(symbols[i + 1 :]): + if isequal(other, cur): + del symbols[i + j + 1] + break + elif masks(cur, other): + del symbols[i + j + 1] + symbols.insert(i, other) + break + else: + i += 1 + + if useRegex: + re_flags: int = re.IGNORECASE if caseless else 0 + + try: + if all(len(sym) == 1 for sym in symbols): + # symbols are just single characters, create range regex pattern + patt = "[{}]".format( + "".join(_escape_regex_range_chars(sym) for sym in symbols) + ) + else: + patt = "|".join(re.escape(sym) for sym in symbols) + + # wrap with \b word break markers if defining as keywords + if asKeyword: + patt = r"\b(?:{})\b".format(patt) + + ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) + + if caseless: + # add parse action to return symbols as specified, not in random + # casing as found in input string + symbol_map = {sym.lower(): sym for sym in symbols} + ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) + + return ret + + except sre_constants.error: + warnings.warn( + "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 + ) + + # last resort, just use MatchFirst + return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( + " | ".join(symbols) + ) + + +def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: + """Helper to easily and clearly define a dictionary by specifying + the respective patterns for the key and value. Takes care of + defining the :class:`Dict`, :class:`ZeroOrMore`, and + :class:`Group` tokens in the proper order. The key pattern + can include delimiting markers or punctuation, as long as they are + suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the :class:`Dict` results + can include named token fields. + + Example:: + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + print(OneOrMore(attr_expr).parse_string(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) + + # similar to Dict, but simpler call format + result = dict_of(attr_label, attr_value).parse_string(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.as_dict()) + + prints:: + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} + """ + return Dict(OneOrMore(Group(key + value))) + + +def original_text_for( + expr: ParserElement, as_string: bool = True, *, asString: bool = True +) -> ParserElement: + """Helper to return the original, untokenized text for a given + expression. Useful to restore the parsed fields of an HTML start + tag into the raw tag text itself, or to revert separate tokens with + intervening whitespace back to the original matching input text. By + default, returns astring containing the original parsed text. + + If the optional ``as_string`` argument is passed as + ``False``, then the return value is + a :class:`ParseResults` containing any results names that + were originally matched, and a single token containing the original + matched text from the input string. So if the expression passed to + :class:`original_text_for` contains expressions with defined + results names, you must set ``as_string`` to ``False`` if you + want to preserve those results name values. + + The ``asString`` pre-PEP8 argument is retained for compatibility, + but will be removed in a future release. + + Example:: + + src = "this is test bold text normal text " + for tag in ("b", "i"): + opener, closer = make_html_tags(tag) + patt = original_text_for(opener + SkipTo(closer) + closer) + print(patt.search_string(src)[0]) + + prints:: + + [' bold text '] + ['text'] + """ + asString = asString and as_string + + locMarker = Empty().set_parse_action(lambda s, loc, t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s, l, t: s[t._original_start : t._original_end] + else: + + def extractText(s, l, t): + t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] + + matchExpr.set_parse_action(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) + return matchExpr + + +def ungroup(expr: ParserElement) -> ParserElement: + """Helper to undo pyparsing's default grouping of And expressions, + even if all but one are non-empty. + """ + return TokenConverter(expr).add_parse_action(lambda t: t[0]) + + +def locatedExpr(expr: ParserElement) -> ParserElement: + """ + (DEPRECATED - future code should use the Located class) + Helper to decorate a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parseWithTabs` + + Example:: + + wd = Word(alphas) + for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] + """ + locator = Empty().set_parse_action(lambda ss, ll, tt: ll) + return Group( + locator("locn_start") + + expr("value") + + locator.copy().leaveWhitespace()("locn_end") + ) + + +def nested_expr( + opener: Union[str, ParserElement] = "(", + closer: Union[str, ParserElement] = ")", + content: OptionalType[ParserElement] = None, + ignore_expr: ParserElement = quoted_string(), + *, + ignoreExpr: ParserElement = quoted_string(), +) -> ParserElement: + """Helper method for defining nested lists enclosed in opening and + closing delimiters (``"("`` and ``")"`` are the default). + + Parameters: + - ``opener`` - opening character for a nested list + (default= ``"("``); can also be a pyparsing expression + - ``closer`` - closing character for a nested list + (default= ``")"``); can also be a pyparsing expression + - ``content`` - expression for items within the nested lists + (default= ``None``) + - ``ignore_expr`` - expression for ignoring opening and closing delimiters + (default= :class:`quoted_string`) + - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility + but will be removed in a future release + + If an expression is not provided for the content argument, the + nested expression will capture all whitespace-delimited content + between delimiters as a list of separate values. + + Use the ``ignore_expr`` argument to define expressions that may + contain opening or closing characters that should not be treated as + opening or closing characters for nesting, such as quoted_string or + a comment expression. Specify multiple expressions using an + :class:`Or` or :class:`MatchFirst`. The default is + :class:`quoted_string`, but if no expressions are to be ignored, then + pass ``None`` for this argument. + + Example:: + + data_type = one_of("void int short long char float double") + decl_data_type = Combine(data_type + Opt(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR, RPAR = map(Suppress, "()") + + code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Opt(delimited_list(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(c_style_comment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.search_string(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + + prints:: + + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if ignoreExpr != ignore_expr: + ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener, str_type) and isinstance(closer, str_type): + if len(opener) == 1 and len(closer) == 1: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS, + exact=1, + ) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = empty.copy() + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS + ).set_parse_action(lambda t: t[0].strip()) + else: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = Combine( + OneOrMore( + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + raise ValueError( + "opening and closing arguments must be strings if no content expression is given" + ) + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( + Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) + ) + else: + ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) + ret.set_name("nested %s%s expression" % (opener, closer)) + return ret + + +def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): + """Internal helper to construct opening and closing tag expressions, given a tag name""" + if isinstance(tagStr, str_type): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas, alphanums + "_-:") + if xml: + tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + else: + tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( + printables, exclude_chars=">" + ) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict( + ZeroOrMore( + Group( + tagAttrName.set_parse_action(lambda t: t[0].lower()) + + Opt(Suppress("=") + tagAttrValue) + ) + ) + ) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + closeTag = Combine(Literal("", adjacent=False) + + openTag.set_name("<%s>" % resname) + # add start results name in parse action now that ungrouped names are not reported at two levels + openTag.add_parse_action( + lambda t: t.__setitem__( + "start" + "".join(resname.replace(":", " ").title().split()), t.copy() + ) + ) + closeTag = closeTag( + "end" + "".join(resname.replace(":", " ").title().split()) + ).set_name("" % resname) + openTag.tag = resname + closeTag.tag = resname + openTag.tag_body = SkipTo(closeTag()) + return openTag, closeTag + + +def make_html_tags( + tag_str: Union[str, ParserElement] +) -> Tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for HTML, + given a tag name. Matches tags in either upper or lower case, + attributes with namespaces and with quoted or unquoted values. + + Example:: + + text = 'More info at the pyparsing wiki page' + # make_html_tags returns pyparsing expressions for the opening and + # closing tags as a 2-tuple + a, a_end = make_html_tags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.search_string(text): + # attributes in the tag (like "href" shown here) are + # also accessible as named results + print(link.link_text, '->', link.href) + + prints:: + + pyparsing -> https://github.com/pyparsing/pyparsing/wiki + """ + return _makeTags(tag_str, False) + + +def make_xml_tags( + tag_str: Union[str, ParserElement] +) -> Tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for XML, + given a tag name. Matches tags only in the given upper/lower case. + + Example: similar to :class:`make_html_tags` + """ + return _makeTags(tag_str, True) + + +any_open_tag, any_close_tag = make_html_tags( + Word(alphas, alphanums + "_:").set_name("any tag") +) + +_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} +common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( + "common HTML entity" +) + + +def replace_html_entity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + + +class OpAssoc(Enum): + LEFT = 1 + RIGHT = 2 + + +InfixNotationOperatorArgType = Union[ + ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] +] +InfixNotationOperatorSpec = Union[ + Tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + OptionalType[ParseAction], + ], + Tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + ], +] + + +def infix_notation( + base_expr: ParserElement, + op_list: List[InfixNotationOperatorSpec], + lpar: Union[str, ParserElement] = Suppress("("), + rpar: Union[str, ParserElement] = Suppress(")"), +) -> ParserElement: + """Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary + or binary, left- or right-associative. Parse actions can also be + attached to operator expressions. The generated parser will also + recognize the use of parentheses to override operator precedences + (see example below). + + Note: if you define a deep operator list, you may see performance + issues when using infix_notation. See + :class:`ParserElement.enable_packrat` for a mechanism to potentially + improve your parser performance. + + Parameters: + - ``base_expr`` - expression representing the most basic operand to + be used in the expression + - ``op_list`` - list of tuples, one for each operator precedence level + in the expression grammar; each tuple is of the form ``(op_expr, + num_operands, right_left_assoc, (optional)parse_action)``, where: + + - ``op_expr`` is the pyparsing expression for the operator; may also + be a string, which will be converted to a Literal; if ``num_operands`` + is 3, ``op_expr`` is a tuple of two expressions, for the two + operators separating the 3 terms + - ``num_operands`` is the number of terms for this operator (must be 1, + 2, or 3) + - ``right_left_assoc`` is the indicator whether the operator is right + or left associative, using the pyparsing-defined constants + ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. + - ``parse_action`` is the parse action to be associated with + expressions matching this operator expression (the parse action + tuple member may be omitted); if the parse action is passed + a tuple or list of functions, this is equivalent to calling + ``set_parse_action(*fn)`` + (:class:`ParserElement.set_parse_action`) + - ``lpar`` - expression for matching left-parentheses + (default= ``Suppress('(')``) + - ``rpar`` - expression for matching right-parentheses + (default= ``Suppress(')')``) + + Example:: + + # simple example of four-function arithmetic with ints and + # variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infix_notation(integer | varname, + [ + ('-', 1, OpAssoc.RIGHT), + (one_of('* /'), 2, OpAssoc.LEFT), + (one_of('+ -'), 2, OpAssoc.LEFT), + ]) + + arith_expr.run_tests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', full_dump=False) + + prints:: + + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + # captive version of FollowedBy that does not do parse actions or capture results names + class _FB(FollowedBy): + def parseImpl(self, instring, loc, doActions=True): + self.expr.try_parse(instring, loc) + return loc, [] + + _FB.__name__ = "FollowedBy>" + + ret = Forward() + lpar = Suppress(lpar) + rpar = Suppress(rpar) + lastExpr = base_expr | (lpar + ret + rpar) + for i, operDef in enumerate(op_list): + opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] + if isinstance(opExpr, str_type): + opExpr = ParserElement._literalStringClass(opExpr) + if arity == 3: + if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: + raise ValueError( + "if numterms=3, opExpr must be a tuple or list of two expressions" + ) + opExpr1, opExpr2 = opExpr + term_name = "{}{} term".format(opExpr1, opExpr2) + else: + term_name = "{} term".format(opExpr) + + if not 1 <= arity <= 3: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + + if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): + raise ValueError("operator must indicate right or left associativity") + + thisExpr = Forward().set_name(term_name) + if rightLeftAssoc is OpAssoc.LEFT: + if arity == 1: + matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( + lastExpr + (opExpr + lastExpr)[1, ...] + ) + else: + matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) + elif arity == 3: + matchExpr = _FB( + lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr + ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) + elif rightLeftAssoc is OpAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Opt): + opExpr = Opt(opExpr) + matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( + lastExpr + (opExpr + thisExpr)[1, ...] + ) + else: + matchExpr = _FB(lastExpr + thisExpr) + Group( + lastExpr + thisExpr[1, ...] + ) + elif arity == 3: + matchExpr = _FB( + lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr + ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.set_parse_action(*pa) + else: + matchExpr.set_parse_action(pa) + thisExpr <<= (matchExpr | lastExpr).setName(term_name) + lastExpr = thisExpr + ret <<= lastExpr + return ret + + +def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): + """ + (DEPRECATED - use IndentedBlock class instead) + Helper method for defining space-delimited indentation blocks, + such as those used to define block statements in Python source code. + + Parameters: + + - ``blockStatementExpr`` - expression defining syntax of statement that + is repeated within the indented block + - ``indentStack`` - list created by caller to manage indentation stack + (multiple ``statementWithIndentedBlock`` expressions within a single + grammar should share a common ``indentStack``) + - ``indent`` - boolean indicating whether block must be indented beyond + the current level; set to ``False`` for block of left-most statements + (default= ``True``) + + A valid block must contain at least one ``blockStatement``. + + (Note that indentedBlock uses internal parse actions which make it + incompatible with packrat parsing.) + + Example:: + + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group(funcDecl + func_body) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << (funcDef | assignment | identifier) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + + prints:: + + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + backup_stacks.append(indentStack[:]) + + def reset_stack(): + indentStack[:] = backup_stacks[-1] + + def checkPeerIndent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseException(s, l, "illegal nesting") + raise ParseException(s, l, "not a peer entry") + + def checkSubIndent(s, l, t): + curCol = col(l, s) + if curCol > indentStack[-1]: + indentStack.append(curCol) + else: + raise ParseException(s, l, "not a subentry") + + def checkUnindent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if not (indentStack and curCol in indentStack): + raise ParseException(s, l, "not an unindent") + if curCol < indentStack[-1]: + indentStack.pop() + + NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) + INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") + PEER = Empty().set_parse_action(checkPeerIndent).set_name("") + UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") + if indent: + smExpr = Group( + Opt(NL) + + INDENT + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + UNDENT + ) + else: + smExpr = Group( + Opt(NL) + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + Opt(UNDENT) + ) + + # add a parse action to remove backup_stack from list of backups + smExpr.add_parse_action( + lambda: backup_stacks.pop(-1) and None if backup_stacks else None + ) + smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.set_name("indented block") + + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( + "C style comment" +) +"Comment of the form ``/* ... */``" + +html_comment = Regex(r"").set_name("HTML comment") +"Comment of the form ````" + +rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") +dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") +"Comment of the form ``// ... (to end of line)``" + +cpp_style_comment = Combine( + Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment +).set_name("C++ style comment") +"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" + +java_style_comment = cpp_style_comment +"Same as :class:`cpp_style_comment`" + +python_style_comment = Regex(r"#.*").set_name("Python style comment") +"Comment of the form ``# ... (to end of line)``" + + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] + + +# pre-PEP8 compatible names +delimitedList = delimited_list +countedArray = counted_array +matchPreviousLiteral = match_previous_literal +matchPreviousExpr = match_previous_expr +oneOf = one_of +dictOf = dict_of +originalTextFor = original_text_for +nestedExpr = nested_expr +makeHTMLTags = make_html_tags +makeXMLTags = make_xml_tags +anyOpenTag, anyCloseTag = any_open_tag, any_close_tag +commonHTMLEntity = common_html_entity +replaceHTMLEntity = replace_html_entity +opAssoc = OpAssoc +infixNotation = infix_notation +cStyleComment = c_style_comment +htmlComment = html_comment +restOfLine = rest_of_line +dblSlashComment = dbl_slash_comment +cppStyleComment = cpp_style_comment +javaStyleComment = java_style_comment +pythonStyleComment = python_style_comment diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/results.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/results.py new file mode 100644 index 000000000..842d16b3c --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/results.py @@ -0,0 +1,758 @@ +# results.py +from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator +import pprint +from weakref import ref as wkref +from typing import Tuple, Any + +str_type: Tuple[type, ...] = (str, bytes) +_generator_type = type((_ for _ in ())) + + +class _ParseResultsWithOffset: + __slots__ = ["tup"] + + def __init__(self, p1, p2): + self.tup = (p1, p2) + + def __getitem__(self, i): + return self.tup[i] + + def __getstate__(self): + return self.tup + + def __setstate__(self, *args): + self.tup = args[0] + + +class ParseResults: + """Structured parse results, to provide multiple means of access to + the parsed data: + + - as a list (``len(results)``) + - by list index (``results[0], results[1]``, etc.) + - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) + + Example:: + + integer = Word(nums) + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + # equivalent form: + # date_str = (integer("year") + '/' + # + integer("month") + '/' + # + integer("day")) + + # parse_string returns a ParseResults object + result = date_str.parse_string("1999/12/31") + + def test(s, fn=repr): + print("{} -> {}".format(s, fn(eval(s)))) + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + + prints:: + + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: 31 + - month: 12 + - year: 1999 + """ + + _null_values: Tuple[Any, ...] = (None, [], "", ()) + + __slots__ = [ + "_name", + "_parent", + "_all_names", + "_modal", + "_toklist", + "_tokdict", + "__weakref__", + ] + + class List(list): + """ + Simple wrapper class to distinguish parsed list results that should be preserved + as actual Python lists, instead of being converted to :class:`ParseResults`: + + LBRACK, RBRACK = map(pp.Suppress, "[]") + element = pp.Forward() + item = ppc.integer + element_list = LBRACK + pp.delimited_list(element) + RBRACK + + # add parse actions to convert from ParseResults to actual Python collection types + def as_python_list(t): + return pp.ParseResults.List(t.as_list()) + element_list.add_parse_action(as_python_list) + + element <<= item | element_list + + element.run_tests(''' + 100 + [2,3,4] + [[2, 1],3,4] + [(2, 1),3,4] + (2,3,4) + ''', post_parse=lambda s, r: (r[0], type(r[0]))) + + prints: + + 100 + (100, ) + + [2,3,4] + ([2, 3, 4], ) + + [[2, 1],3,4] + ([[2, 1], 3, 4], ) + + (Used internally by :class:`Group` when `aslist=True`.) + """ + + def __new__(cls, contained=None): + if contained is None: + contained = [] + + if not isinstance(contained, list): + raise TypeError( + "{} may only be constructed with a list," + " not {}".format(cls.__name__, type(contained).__name__) + ) + + return list.__new__(cls) + + def __new__(cls, toklist=None, name=None, **kwargs): + if isinstance(toklist, ParseResults): + return toklist + self = object.__new__(cls) + self._name = None + self._parent = None + self._all_names = set() + + if toklist is None: + self._toklist = [] + elif isinstance(toklist, (list, _generator_type)): + self._toklist = ( + [toklist[:]] + if isinstance(toklist, ParseResults.List) + else list(toklist) + ) + else: + self._toklist = [toklist] + self._tokdict = dict() + return self + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance + ): + self._modal = modal + if name is not None and name != "": + if isinstance(name, int): + name = str(name) + if not modal: + self._all_names = {name} + self._name = name + if toklist not in self._null_values: + if isinstance(toklist, (str_type, type)): + toklist = [toklist] + if asList: + if isinstance(toklist, ParseResults): + self[name] = _ParseResultsWithOffset( + ParseResults(toklist._toklist), 0 + ) + else: + self[name] = _ParseResultsWithOffset( + ParseResults(toklist[0]), 0 + ) + self[name]._name = name + else: + try: + self[name] = toklist[0] + except (KeyError, TypeError, IndexError): + if toklist is not self: + self[name] = toklist + else: + self._name = name + + def __getitem__(self, i): + if isinstance(i, (int, slice)): + return self._toklist[i] + else: + if i not in self._all_names: + return self._tokdict[i][-1][0] + else: + return ParseResults([v[0] for v in self._tokdict[i]]) + + def __setitem__(self, k, v, isinstance=isinstance): + if isinstance(v, _ParseResultsWithOffset): + self._tokdict[k] = self._tokdict.get(k, list()) + [v] + sub = v[0] + elif isinstance(k, (int, slice)): + self._toklist[k] = v + sub = v + else: + self._tokdict[k] = self._tokdict.get(k, list()) + [ + _ParseResultsWithOffset(v, 0) + ] + sub = v + if isinstance(sub, ParseResults): + sub._parent = wkref(self) + + def __delitem__(self, i): + if isinstance(i, (int, slice)): + mylen = len(self._toklist) + del self._toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i + 1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for name, occurrences in self._tokdict.items(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position - (position > j) + ) + else: + del self._tokdict[i] + + def __contains__(self, k) -> bool: + return k in self._tokdict + + def __len__(self) -> int: + return len(self._toklist) + + def __bool__(self) -> bool: + return not not (self._toklist or self._tokdict) + + def __iter__(self) -> Iterator: + return iter(self._toklist) + + def __reversed__(self) -> Iterator: + return iter(self._toklist[::-1]) + + def keys(self): + return iter(self._tokdict) + + def values(self): + return (self[k] for k in self.keys()) + + def items(self): + return ((k, self[k]) for k in self.keys()) + + def haskeys(self) -> bool: + """ + Since ``keys()`` returns an iterator, this method is helpful in bypassing + code that looks for the existence of any defined results names.""" + return bool(self._tokdict) + + def pop(self, *args, **kwargs): + """ + Removes and returns item at specified index (default= ``last``). + Supports both ``list`` and ``dict`` semantics for ``pop()``. If + passed no argument or an integer argument, it will use ``list`` + semantics and pop tokens from the list of parsed tokens. If passed + a non-integer argument (most likely a string), it will use ``dict`` + semantics and pop the corresponding value from any defined results + names. A second default return value argument is supported, just as in + ``dict.pop()``. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + def remove_first(tokens): + tokens.pop(0) + numlist.add_parse_action(remove_first) + print(numlist.parse_string("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + OneOrMore(Word(nums)) + print(patt.parse_string("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.add_parse_action(remove_LABEL) + print(patt.parse_string("AAB 123 321").dump()) + + prints:: + + ['AAB', '123', '321'] + - LABEL: AAB + + ['AAB', '123', '321'] + """ + if not args: + args = [-1] + for k, v in kwargs.items(): + if k == "default": + args = (args[0], v) + else: + raise TypeError( + "pop() got an unexpected keyword argument {!r}".format(k) + ) + if isinstance(args[0], int) or len(args) == 1 or args[0] in self: + index = args[0] + ret = self[index] + del self[index] + return ret + else: + defaultvalue = args[1] + return defaultvalue + + def get(self, key, default_value=None): + """ + Returns named result matching the given key, or if there is no + such name, then returns the given ``default_value`` or ``None`` if no + ``default_value`` is specified. + + Similar to ``dict.get()``. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ + if key in self: + return self[key] + else: + return default_value + + def insert(self, index, ins_string): + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to ``list.insert()``. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + numlist.add_parse_action(insert_locn) + print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321'] + """ + self._toklist.insert(index, ins_string) + # fixup indices in token dictionary + for name, occurrences in self._tokdict.items(): + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position + (position > index) + ) + + def append(self, item): + """ + Add single element to end of ``ParseResults`` list of elements. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + numlist.add_parse_action(append_sum) + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444] + """ + self._toklist.append(item) + + def extend(self, itemseq): + """ + Add sequence of elements to end of ``ParseResults`` list of elements. + + Example:: + + patt = OneOrMore(Word(alphas)) + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + patt.add_parse_action(make_palindrome) + print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ + if isinstance(itemseq, ParseResults): + self.__iadd__(itemseq) + else: + self._toklist.extend(itemseq) + + def clear(self): + """ + Clear all elements and results names. + """ + del self._toklist[:] + self._tokdict.clear() + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + if name.startswith("__"): + raise AttributeError(name) + return "" + + def __add__(self, other) -> "ParseResults": + ret = self.copy() + ret += other + return ret + + def __iadd__(self, other) -> "ParseResults": + if other._tokdict: + offset = len(self._toklist) + addoffset = lambda a: offset if a < 0 else a + offset + otheritems = other._tokdict.items() + otherdictitems = [ + (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) + for k, vlist in otheritems + for v in vlist + ] + for k, v in otherdictitems: + self[k] = v + if isinstance(v[0], ParseResults): + v[0]._parent = wkref(self) + + self._toklist += other._toklist + self._all_names |= other._all_names + return self + + def __radd__(self, other) -> "ParseResults": + if isinstance(other, int) and other == 0: + # useful for merging many ParseResults using sum() builtin + return self.copy() + else: + # this may raise a TypeError - so be it + return other + self + + def __repr__(self) -> str: + return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict()) + + def __str__(self) -> str: + return ( + "[" + + ", ".join( + str(i) if isinstance(i, ParseResults) else repr(i) + for i in self._toklist + ) + + "]" + ) + + def _asStringList(self, sep=""): + out = [] + for item in self._toklist: + if out and sep: + out.append(sep) + if isinstance(item, ParseResults): + out += item._asStringList() + else: + out.append(str(item)) + return out + + def as_list(self) -> list: + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + + patt = OneOrMore(Word(alphas)) + result = patt.parse_string("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] + + # Use as_list() to create an actual list + result_list = result.as_list() + print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] + """ + return [ + res.as_list() if isinstance(res, ParseResults) else res + for res in self._toklist + ] + + def as_dict(self) -> dict: + """ + Returns the named parse results as a nested dictionary. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string('12/31/1999') + print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.as_dict() + print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ + + def to_item(obj): + if isinstance(obj, ParseResults): + return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] + else: + return obj + + return dict((k, to_item(v)) for k, v in self.items()) + + def copy(self) -> "ParseResults": + """ + Returns a new copy of a :class:`ParseResults` object. + """ + ret = ParseResults(self._toklist) + ret._tokdict = self._tokdict.copy() + ret._parent = self._parent + ret._all_names |= self._all_names + ret._name = self._name + return ret + + def get_name(self): + r""" + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = OneOrMore(user_data) + + result = user_info.parse_string("22 111-22-3333 #221B") + for item in result: + print(item.get_name(), ':', item[0]) + + prints:: + + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ + if self._name: + return self._name + elif self._parent: + par = self._parent() + + def find_in_parent(sub): + return next( + ( + k + for k, vlist in par._tokdict.items() + for v, loc in vlist + if sub is v + ), + None, + ) + + return find_in_parent(self) if par else None + elif ( + len(self) == 1 + and len(self._tokdict) == 1 + and next(iter(self._tokdict.values()))[0][1] in (0, -1) + ): + return next(iter(self._tokdict.keys())) + else: + return None + + def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: + """ + Diagnostic method for listing out the contents of + a :class:`ParseResults`. Accepts an optional ``indent`` argument so + that this string can be embedded in a nested display of other data. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string('12/31/1999') + print(result.dump()) + + prints:: + + ['12', '/', '31', '/', '1999'] + - day: 1999 + - month: 31 + - year: 12 + """ + out = [] + NL = "\n" + out.append(indent + str(self.as_list()) if include_list else "") + + if full: + if self.haskeys(): + items = sorted((str(k), v) for k, v in self.items()) + for k, v in items: + if out: + out.append(NL) + out.append("{}{}- {}: ".format(indent, (" " * _depth), k)) + if isinstance(v, ParseResults): + if v: + out.append( + v.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ) + ) + else: + out.append(str(v)) + else: + out.append(repr(v)) + if any(isinstance(vv, ParseResults) for vv in self): + v = self + for i, vv in enumerate(v): + if isinstance(vv, ParseResults): + out.append( + "\n{}{}[{}]:\n{}{}{}".format( + indent, + (" " * (_depth)), + i, + indent, + (" " * (_depth + 1)), + vv.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ), + ) + ) + else: + out.append( + "\n%s%s[%d]:\n%s%s%s" + % ( + indent, + (" " * (_depth)), + i, + indent, + (" " * (_depth + 1)), + str(vv), + ) + ) + + return "".join(out) + + def pprint(self, *args, **kwargs): + """ + Pretty-printer for parsed results as a list, using the + `pprint `_ module. + Accepts additional positional or keyword args as defined for + `pprint.pprint `_ . + + Example:: + + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(delimited_list(term))) + result = func.parse_string("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + + prints:: + + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ + pprint.pprint(self.as_list(), *args, **kwargs) + + # add support for pickle protocol + def __getstate__(self): + return ( + self._toklist, + ( + self._tokdict.copy(), + self._parent is not None and self._parent() or None, + self._all_names, + self._name, + ), + ) + + def __setstate__(self, state): + self._toklist, (self._tokdict, par, inAccumNames, self._name) = state + self._all_names = set(inAccumNames) + if par is not None: + self._parent = wkref(par) + else: + self._parent = None + + def __getnewargs__(self): + return self._toklist, self._name + + def __dir__(self): + return dir(type(self)) + list(self.keys()) + + @classmethod + def from_dict(cls, other, name=None) -> "ParseResults": + """ + Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the + name-value relations as results names. If an optional ``name`` argument is + given, a nested ``ParseResults`` will be returned. + """ + + def is_iterable(obj): + try: + iter(obj) + except Exception: + return False + else: + return not isinstance(obj, str_type) + + ret = cls([]) + for k, v in other.items(): + if isinstance(v, Mapping): + ret += cls.from_dict(v, name=k) + else: + ret += cls([v], name=k, asList=is_iterable(v)) + if name is not None: + ret = cls([ret], name=name) + return ret + + asList = as_list + asDict = as_dict + getName = get_name + + +MutableMapping.register(ParseResults) +MutableSequence.register(ParseResults) diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/testing.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/testing.py new file mode 100644 index 000000000..991972f3f --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/testing.py @@ -0,0 +1,331 @@ +# testing.py + +from contextlib import contextmanager +from typing import Optional + +from .core import ( + ParserElement, + ParseException, + Keyword, + __diag__, + __compat__, +) + + +class pyparsing_test: + """ + namespace class for classes useful in writing unit tests + """ + + class reset_pyparsing_context: + """ + Context manager to be used when writing unit tests that modify pyparsing config values: + - packrat parsing + - bounded recursion parsing + - default whitespace characters. + - default keyword characters + - literal string auto-conversion class + - __diag__ settings + + Example:: + + with reset_pyparsing_context(): + # test that literals used to construct a grammar are automatically suppressed + ParserElement.inlineLiteralsUsing(Suppress) + + term = Word(alphas) | Word(nums) + group = Group('(' + term[...] + ')') + + # assert that the '()' characters are not included in the parsed tokens + self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) + + # after exiting context manager, literals are converted to Literal expressions again + """ + + def __init__(self): + self._save_context = {} + + def save(self): + self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS + self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS + + self._save_context[ + "literal_string_class" + ] = ParserElement._literalStringClass + + self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace + + self._save_context["packrat_enabled"] = ParserElement._packratEnabled + if ParserElement._packratEnabled: + self._save_context[ + "packrat_cache_size" + ] = ParserElement.packrat_cache.size + else: + self._save_context["packrat_cache_size"] = None + self._save_context["packrat_parse"] = ParserElement._parse + self._save_context[ + "recursion_enabled" + ] = ParserElement._left_recursion_enabled + + self._save_context["__diag__"] = { + name: getattr(__diag__, name) for name in __diag__._all_names + } + + self._save_context["__compat__"] = { + "collect_all_And_tokens": __compat__.collect_all_And_tokens + } + + return self + + def restore(self): + # reset pyparsing global state + if ( + ParserElement.DEFAULT_WHITE_CHARS + != self._save_context["default_whitespace"] + ): + ParserElement.set_default_whitespace_chars( + self._save_context["default_whitespace"] + ) + + ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] + + Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] + ParserElement.inlineLiteralsUsing( + self._save_context["literal_string_class"] + ) + + for name, value in self._save_context["__diag__"].items(): + (__diag__.enable if value else __diag__.disable)(name) + + ParserElement._packratEnabled = False + if self._save_context["packrat_enabled"]: + ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) + else: + ParserElement._parse = self._save_context["packrat_parse"] + ParserElement._left_recursion_enabled = self._save_context[ + "recursion_enabled" + ] + + __compat__.collect_all_And_tokens = self._save_context["__compat__"] + + return self + + def copy(self): + ret = type(self)() + ret._save_context.update(self._save_context) + return ret + + def __enter__(self): + return self.save() + + def __exit__(self, *args): + self.restore() + + class TestParseResultsAsserts: + """ + A mixin class to add parse results assertion methods to normal unittest.TestCase classes. + """ + + def assertParseResultsEquals( + self, result, expected_list=None, expected_dict=None, msg=None + ): + """ + Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, + and compare any defined results names with an optional ``expected_dict``. + """ + if expected_list is not None: + self.assertEqual(expected_list, result.as_list(), msg=msg) + if expected_dict is not None: + self.assertEqual(expected_dict, result.as_dict(), msg=msg) + + def assertParseAndCheckList( + self, expr, test_string, expected_list, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. + """ + result = expr.parse_string(test_string, parse_all=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) + + def assertParseAndCheckDict( + self, expr, test_string, expected_dict, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. + """ + result = expr.parse_string(test_string, parseAll=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) + + def assertRunTestResults( + self, run_tests_report, expected_parse_results=None, msg=None + ): + """ + Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of + list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped + with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. + Finally, asserts that the overall ``runTests()`` success value is ``True``. + + :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests + :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] + """ + run_test_success, run_test_results = run_tests_report + + if expected_parse_results is not None: + merged = [ + (*rpt, expected) + for rpt, expected in zip(run_test_results, expected_parse_results) + ] + for test_string, result, expected in merged: + # expected should be a tuple containing a list and/or a dict or an exception, + # and optional failure message string + # an empty tuple will skip any result validation + fail_msg = next( + (exp for exp in expected if isinstance(exp, str)), None + ) + expected_exception = next( + ( + exp + for exp in expected + if isinstance(exp, type) and issubclass(exp, Exception) + ), + None, + ) + if expected_exception is not None: + with self.assertRaises( + expected_exception=expected_exception, msg=fail_msg or msg + ): + if isinstance(result, Exception): + raise result + else: + expected_list = next( + (exp for exp in expected if isinstance(exp, list)), None + ) + expected_dict = next( + (exp for exp in expected if isinstance(exp, dict)), None + ) + if (expected_list, expected_dict) != (None, None): + self.assertParseResultsEquals( + result, + expected_list=expected_list, + expected_dict=expected_dict, + msg=fail_msg or msg, + ) + else: + # warning here maybe? + print("no validation for {!r}".format(test_string)) + + # do this last, in case some specific test results can be reported instead + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + + @contextmanager + def assertRaisesParseException(self, exc_type=ParseException, msg=None): + with self.assertRaises(exc_type, msg=msg): + yield + + @staticmethod + def with_line_numbers( + s: str, + start_line: Optional[int] = None, + end_line: Optional[int] = None, + expand_tabs: bool = True, + eol_mark: str = "|", + mark_spaces: Optional[str] = None, + mark_control: Optional[str] = None, + ) -> str: + """ + Helpful method for debugging a parser - prints a string with line and column numbers. + (Line and column numbers are 1-based.) + + :param s: tuple(bool, str - string to be printed with line and column numbers + :param start_line: int - (optional) starting line number in s to print (default=1) + :param end_line: int - (optional) ending line number in s to print (default=len(s)) + :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default + :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") + :param mark_spaces: str - (optional) special character to display in place of spaces + :param mark_control: str - (optional) convert non-printing control characters to a placeholding + character; valid values: + - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" + - any single character string - replace control characters with given string + - None (default) - string is displayed as-is + + :return: str - input string with leading line numbers and column number headers + """ + if expand_tabs: + s = s.expandtabs() + if mark_control is not None: + if mark_control == "unicode": + tbl = str.maketrans( + {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))} + | {127: 0x2421} + ) + eol_mark = "" + else: + tbl = str.maketrans( + {c: mark_control for c in list(range(0, 32)) + [127]} + ) + s = s.translate(tbl) + if mark_spaces is not None and mark_spaces != " ": + if mark_spaces == "unicode": + tbl = str.maketrans({9: 0x2409, 32: 0x2423}) + s = s.translate(tbl) + else: + s = s.replace(" ", mark_spaces) + if start_line is None: + start_line = 1 + if end_line is None: + end_line = len(s) + end_line = min(end_line, len(s)) + start_line = min(max(1, start_line), end_line) + + if mark_control != "unicode": + s_lines = s.splitlines()[start_line - 1 : end_line] + else: + s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] + if not s_lines: + return "" + + lineno_width = len(str(end_line)) + max_line_len = max(len(line) for line in s_lines) + lead = " " * (lineno_width + 1) + if max_line_len >= 99: + header0 = ( + lead + + "".join( + "{}{}".format(" " * 99, (i + 1) % 100) + for i in range(max(max_line_len // 100, 1)) + ) + + "\n" + ) + else: + header0 = "" + header1 = ( + header0 + + lead + + "".join( + " {}".format((i + 1) % 10) + for i in range(-(-max_line_len // 10)) + ) + + "\n" + ) + header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" + return ( + header1 + + header2 + + "\n".join( + "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark) + for i, line in enumerate(s_lines, start=start_line) + ) + + "\n" + ) diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/unicode.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/unicode.py new file mode 100644 index 000000000..caa3306db --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/unicode.py @@ -0,0 +1,332 @@ +# unicode.py + +import sys +from itertools import filterfalse +from typing import List, Tuple, Union + + +class _lazyclassproperty: + def __init__(self, fn): + self.fn = fn + self.__doc__ = fn.__doc__ + self.__name__ = fn.__name__ + + def __get__(self, obj, cls): + if cls is None: + cls = type(obj) + if not hasattr(cls, "_intern") or any( + cls._intern is getattr(superclass, "_intern", []) + for superclass in cls.__mro__[1:] + ): + cls._intern = {} + attrname = self.fn.__name__ + if attrname not in cls._intern: + cls._intern[attrname] = self.fn(cls) + return cls._intern[attrname] + + +UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] + + +class unicode_set: + """ + A set of Unicode characters, for language-specific strings for + ``alphas``, ``nums``, ``alphanums``, and ``printables``. + A unicode_set is defined by a list of ranges in the Unicode character + set, in a class attribute ``_ranges``. Ranges can be specified using + 2-tuples or a 1-tuple, such as:: + + _ranges = [ + (0x0020, 0x007e), + (0x00a0, 0x00ff), + (0x0100,), + ] + + Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). + + A unicode set can also be defined using multiple inheritance of other unicode sets:: + + class CJK(Chinese, Japanese, Korean): + pass + """ + + _ranges: UnicodeRangeList = [] + + @_lazyclassproperty + def _chars_for_ranges(cls): + ret = [] + for cc in cls.__mro__: + if cc is unicode_set: + break + for rr in getattr(cc, "_ranges", ()): + ret.extend(range(rr[0], rr[-1] + 1)) + return [chr(c) for c in sorted(set(ret))] + + @_lazyclassproperty + def printables(cls): + "all non-whitespace characters in this range" + return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphas(cls): + "all alphabetic characters in this range" + return "".join(filter(str.isalpha, cls._chars_for_ranges)) + + @_lazyclassproperty + def nums(cls): + "all numeric digit characters in this range" + return "".join(filter(str.isdigit, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphanums(cls): + "all alphanumeric characters in this range" + return cls.alphas + cls.nums + + @_lazyclassproperty + def identchars(cls): + "all characters in this range that are valid identifier characters, plus underscore '_'" + return "".join( + sorted( + set( + "".join(filter(str.isidentifier, cls._chars_for_ranges)) + + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" + + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" + + "_" + ) + ) + ) + + @_lazyclassproperty + def identbodychars(cls): + """ + all characters in this range that are valid identifier body characters, + plus the digits 0-9 + """ + return "".join( + sorted( + set( + cls.identchars + + "0123456789" + + "".join( + c for c in cls._chars_for_ranges if ("_" + c).isidentifier() + ) + ) + ) + ) + + +class pyparsing_unicode(unicode_set): + """ + A namespace class for defining common language unicode_sets. + """ + + _ranges: UnicodeRangeList = [(32, sys.maxunicode)] + + class Latin1(unicode_set): + "Unicode set for Latin-1 Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0020, 0x007E), + (0x00A0, 0x00FF), + ] + + class LatinA(unicode_set): + "Unicode set for Latin-A Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0100, 0x017F), + ] + + class LatinB(unicode_set): + "Unicode set for Latin-B Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0180, 0x024F), + ] + + class Greek(unicode_set): + "Unicode set for Greek Unicode Character Ranges" + _ranges: UnicodeRangeList = [ + (0x0342, 0x0345), + (0x0370, 0x0377), + (0x037A, 0x037F), + (0x0384, 0x038A), + (0x038C,), + (0x038E, 0x03A1), + (0x03A3, 0x03E1), + (0x03F0, 0x03FF), + (0x1D26, 0x1D2A), + (0x1D5E,), + (0x1D60,), + (0x1D66, 0x1D6A), + (0x1F00, 0x1F15), + (0x1F18, 0x1F1D), + (0x1F20, 0x1F45), + (0x1F48, 0x1F4D), + (0x1F50, 0x1F57), + (0x1F59,), + (0x1F5B,), + (0x1F5D,), + (0x1F5F, 0x1F7D), + (0x1F80, 0x1FB4), + (0x1FB6, 0x1FC4), + (0x1FC6, 0x1FD3), + (0x1FD6, 0x1FDB), + (0x1FDD, 0x1FEF), + (0x1FF2, 0x1FF4), + (0x1FF6, 0x1FFE), + (0x2129,), + (0x2719, 0x271A), + (0xAB65,), + (0x10140, 0x1018D), + (0x101A0,), + (0x1D200, 0x1D245), + (0x1F7A1, 0x1F7A7), + ] + + class Cyrillic(unicode_set): + "Unicode set for Cyrillic Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0400, 0x052F), + (0x1C80, 0x1C88), + (0x1D2B,), + (0x1D78,), + (0x2DE0, 0x2DFF), + (0xA640, 0xA672), + (0xA674, 0xA69F), + (0xFE2E, 0xFE2F), + ] + + class Chinese(unicode_set): + "Unicode set for Chinese Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x2E80, 0x2E99), + (0x2E9B, 0x2EF3), + (0x31C0, 0x31E3), + (0x3400, 0x4DB5), + (0x4E00, 0x9FEF), + (0xA700, 0xA707), + (0xF900, 0xFA6D), + (0xFA70, 0xFAD9), + (0x16FE2, 0x16FE3), + (0x1F210, 0x1F212), + (0x1F214, 0x1F23B), + (0x1F240, 0x1F248), + (0x20000, 0x2A6D6), + (0x2A700, 0x2B734), + (0x2B740, 0x2B81D), + (0x2B820, 0x2CEA1), + (0x2CEB0, 0x2EBE0), + (0x2F800, 0x2FA1D), + ] + + class Japanese(unicode_set): + "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" + _ranges: UnicodeRangeList = [] + + class Kanji(unicode_set): + "Unicode set for Kanji Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x4E00, 0x9FBF), + (0x3000, 0x303F), + ] + + class Hiragana(unicode_set): + "Unicode set for Hiragana Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x3041, 0x3096), + (0x3099, 0x30A0), + (0x30FC,), + (0xFF70,), + (0x1B001,), + (0x1B150, 0x1B152), + (0x1F200,), + ] + + class Katakana(unicode_set): + "Unicode set for Katakana Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x3099, 0x309C), + (0x30A0, 0x30FF), + (0x31F0, 0x31FF), + (0x32D0, 0x32FE), + (0xFF65, 0xFF9F), + (0x1B000,), + (0x1B164, 0x1B167), + (0x1F201, 0x1F202), + (0x1F213,), + ] + + class Hangul(unicode_set): + "Unicode set for Hangul (Korean) Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x1100, 0x11FF), + (0x302E, 0x302F), + (0x3131, 0x318E), + (0x3200, 0x321C), + (0x3260, 0x327B), + (0x327E,), + (0xA960, 0xA97C), + (0xAC00, 0xD7A3), + (0xD7B0, 0xD7C6), + (0xD7CB, 0xD7FB), + (0xFFA0, 0xFFBE), + (0xFFC2, 0xFFC7), + (0xFFCA, 0xFFCF), + (0xFFD2, 0xFFD7), + (0xFFDA, 0xFFDC), + ] + + Korean = Hangul + + class CJK(Chinese, Japanese, Hangul): + "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" + pass + + class Thai(unicode_set): + "Unicode set for Thai Unicode Character Range" + _ranges: UnicodeRangeList = [(0x0E01, 0x0E3A), (0x0E3F, 0x0E5B)] + + class Arabic(unicode_set): + "Unicode set for Arabic Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0600, 0x061B), + (0x061E, 0x06FF), + (0x0700, 0x077F), + ] + + class Hebrew(unicode_set): + "Unicode set for Hebrew Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0591, 0x05C7), + (0x05D0, 0x05EA), + (0x05EF, 0x05F4), + (0xFB1D, 0xFB36), + (0xFB38, 0xFB3C), + (0xFB3E,), + (0xFB40, 0xFB41), + (0xFB43, 0xFB44), + (0xFB46, 0xFB4F), + ] + + class Devanagari(unicode_set): + "Unicode set for Devanagari Unicode Character Range" + _ranges: UnicodeRangeList = [(0x0900, 0x097F), (0xA8E0, 0xA8FF)] + + +pyparsing_unicode.Japanese._ranges = ( + pyparsing_unicode.Japanese.Kanji._ranges + + pyparsing_unicode.Japanese.Hiragana._ranges + + pyparsing_unicode.Japanese.Katakana._ranges +) + +# define ranges in language character sets +pyparsing_unicode.العربية = pyparsing_unicode.Arabic +pyparsing_unicode.中文 = pyparsing_unicode.Chinese +pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic +pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek +pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew +pyparsing_unicode.日本語 = pyparsing_unicode.Japanese +pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji +pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana +pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana +pyparsing_unicode.한국어 = pyparsing_unicode.Korean +pyparsing_unicode.ไทย = pyparsing_unicode.Thai +pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/util.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/util.py new file mode 100644 index 000000000..1309ad6ef --- /dev/null +++ b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/util.py @@ -0,0 +1,234 @@ +# util.py +import warnings +import types +import collections +import itertools +from functools import lru_cache +from typing import List, Union, Iterable + +_bslash = chr(92) + + +class __config_flags: + """Internal class for defining compatibility and debugging flags""" + + _all_names: List[str] = [] + _fixed_names: List[str] = [] + _type_desc = "configuration" + + @classmethod + def _set(cls, dname, value): + if dname in cls._fixed_names: + warnings.warn( + "{}.{} {} is {} and cannot be overridden".format( + cls.__name__, + dname, + cls._type_desc, + str(getattr(cls, dname)).upper(), + ) + ) + return + if dname in cls._all_names: + setattr(cls, dname, value) + else: + raise ValueError("no such {} {!r}".format(cls._type_desc, dname)) + + enable = classmethod(lambda cls, name: cls._set(name, True)) + disable = classmethod(lambda cls, name: cls._set(name, False)) + + +@lru_cache(maxsize=128) +def col(loc: int, strg: str) -> int: + """ + Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See + :class:`ParserElement.parseString` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + """ + s = strg + return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) + + +@lru_cache(maxsize=128) +def lineno(loc: int, strg: str) -> int: + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note - the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`ParserElement.parseString` + for more information on parsing strings containing ```` s, and + suggested methods to maintain a consistent view of the parsed string, the + parse location, and line and column positions within the parsed string. + """ + return strg.count("\n", 0, loc) + 1 + + +@lru_cache(maxsize=128) +def line(loc: int, strg: str) -> str: + """ + Returns the line of text containing loc within a string, counting newlines as line separators. + """ + last_cr = strg.rfind("\n", 0, loc) + next_cr = strg.find("\n", loc) + return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] + + +class _UnboundedCache: + def __init__(self): + cache = {} + cache_get = cache.get + self.not_in_cache = not_in_cache = object() + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + + def clear(_): + cache.clear() + + self.size = None + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class _FifoCache: + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + cache = collections.OrderedDict() + cache_get = cache.get + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + while len(cache) > size: + cache.popitem(last=False) + + def clear(_): + cache.clear() + + self.size = size + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class LRUMemo: + """ + A memoizing mapping that retains `capacity` deleted items + + The memo tracks retained items by their access order; once `capacity` items + are retained, the least recently used item is discarded. + """ + + def __init__(self, capacity): + self._capacity = capacity + self._active = {} + self._memory = collections.OrderedDict() + + def __getitem__(self, key): + try: + return self._active[key] + except KeyError: + self._memory.move_to_end(key) + return self._memory[key] + + def __setitem__(self, key, value): + self._memory.pop(key, None) + self._active[key] = value + + def __delitem__(self, key): + try: + value = self._active.pop(key) + except KeyError: + pass + else: + while len(self._memory) >= self._capacity: + self._memory.popitem(last=False) + self._memory[key] = value + + def clear(self): + self._active.clear() + self._memory.clear() + + +class UnboundedMemo(dict): + """ + A memoizing mapping that retains all deleted items + """ + + def __delitem__(self, key): + pass + + +def _escape_regex_range_chars(s: str) -> str: + # escape these chars: ^-[] + for c in r"\^-[]": + s = s.replace(c, _bslash + c) + s = s.replace("\n", r"\n") + s = s.replace("\t", r"\t") + return str(s) + + +def _collapse_string_to_ranges( + s: Union[str, Iterable[str]], re_escape: bool = True +) -> str: + def is_consecutive(c): + c_int = ord(c) + is_consecutive.prev, prev = c_int, is_consecutive.prev + if c_int - prev > 1: + is_consecutive.value = next(is_consecutive.counter) + return is_consecutive.value + + is_consecutive.prev = 0 + is_consecutive.counter = itertools.count() + is_consecutive.value = -1 + + def escape_re_range_char(c): + return "\\" + c if c in r"\^-][" else c + + def no_escape_re_range_char(c): + return c + + if not re_escape: + escape_re_range_char = no_escape_re_range_char + + ret = [] + s = "".join(sorted(set(s))) + if len(s) > 3: + for _, chars in itertools.groupby(s, key=is_consecutive): + first = last = next(chars) + last = collections.deque( + itertools.chain(iter([last]), chars), maxlen=1 + ).pop() + if first == last: + ret.append(escape_re_range_char(first)) + else: + ret.append( + "{}-{}".format( + escape_re_range_char(first), escape_re_range_char(last) + ) + ) + else: + ret = [escape_re_range_char(c) for c in s] + + return "".join(ret) + + +def _flatten(ll: list) -> list: + ret = [] + for i in ll: + if isinstance(i, list): + ret.extend(_flatten(i)) + else: + ret.append(i) + return ret diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/LICENSE b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/LICENSE new file mode 100644 index 000000000..89de35479 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/LICENSE @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/PKG-INFO new file mode 100644 index 000000000..e917b921c --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/PKG-INFO @@ -0,0 +1,639 @@ +Metadata-Version: 2.1 +Name: setuptools-scm +Version: 6.3.2 +Summary: the blessed package to manage your versions by scm tags +Home-page: https://github.com/pypa/setuptools_scm/ +Author: Ronny Pfannschmidt +Author-email: opensource@ronnypfannschmidt.de +License: MIT +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Version Control +Classifier: Topic :: System :: Software Distribution +Classifier: Topic :: Utilities +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: packaging (>=20.0) +Requires-Dist: setuptools +Requires-Dist: tomli (>=1.0.0) +Provides-Extra: toml +Requires-Dist: setuptools (>=42) ; extra == 'toml' +Requires-Dist: tomli (>=1.0.0) ; extra == 'toml' + +setuptools_scm +============== + +``setuptools_scm`` handles managing your Python package versions +in SCM metadata instead of declaring them as the version argument +or in a SCM managed file. + +Additionally ``setuptools_scm`` provides setuptools with a list of files that are managed by the SCM +(i.e. it automatically adds all of the SCM-managed files to the sdist). +Unwanted files must be excluded by discarding them via ``MANIFEST.in``. + +``setuptools_scm`` support the following scm out of the box: + +* git +* mercurial + + + +.. image:: https://github.com/pypa/setuptools_scm/workflows/python%20tests+artifacts+release/badge.svg + :target: https://github.com/pypa/setuptools_scm/actions + +.. image:: https://tidelift.com/badges/package/pypi/setuptools-scm + :target: https://tidelift.com/subscription/pkg/pypi-setuptools-scm?utm_source=pypi-setuptools-scm&utm_medium=readme + + +``pyproject.toml`` usage +------------------------ + +The preferred way to configure ``setuptools_scm`` is to author +settings in a ``tool.setuptools_scm`` section of ``pyproject.toml``. + +This feature requires Setuptools 42 or later, released in Nov, 2019. +If your project needs to support build from sdist on older versions +of Setuptools, you will need to also implement the ``setup.py usage`` +for those legacy environments. + +First, ensure that ``setuptools_scm`` is present during the project's +built step by specifying it as one of the build requirements. + +.. code:: toml + + # pyproject.toml + [build-system] + requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.2"] + + +That will be sufficient to require ``setuptools_scm`` for projects +that support PEP 518 (`pip `_ and +`pep517 `_). Many tools, +especially those that invoke ``setup.py`` for any reason, may +continue to rely on ``setup_requires``. For maximum compatibility +with those uses, consider also including a ``setup_requires`` directive +(described below in ``setup.py usage`` and ``setup.cfg``). + +To enable version inference, add this section to your pyproject.toml: + +.. code:: toml + + # pyproject.toml + [tool.setuptools_scm] + +Including this section is comparable to supplying +``use_scm_version=True`` in ``setup.py``. Additionally, +include arbitrary keyword arguments in that section +to be supplied to ``get_version()``. For example: + +.. code:: toml + + # pyproject.toml + + [tool.setuptools_scm] + write_to = "pkg/_version.py" + + +``setup.py`` usage (deprecated) +------------------------------- + +.. warning:: + + ``setup_requires`` has been deprecated in favor of ``pyproject.toml`` + +The following settings are considered legacy behavior and +superseded by the ``pyproject.toml`` usage, but for maximal +compatibility, projects may also supply the configuration in +this older form. + +To use ``setuptools_scm`` just modify your project's ``setup.py`` file +like this: + +* Add ``setuptools_scm`` to the ``setup_requires`` parameter. +* Add the ``use_scm_version`` parameter and set it to ``True``. + +For example: + +.. code:: python + + from setuptools import setup + setup( + ..., + use_scm_version=True, + setup_requires=['setuptools_scm'], + ..., + ) + +Arguments to ``get_version()`` (see below) may be passed as a dictionary to +``use_scm_version``. For example: + +.. code:: python + + from setuptools import setup + setup( + ..., + use_scm_version = { + "root": "..", + "relative_to": __file__, + "local_scheme": "node-and-timestamp" + }, + setup_requires=['setuptools_scm'], + ..., + ) + +You can confirm the version number locally via ``setup.py``: + +.. code-block:: shell + + $ python setup.py --version + +.. note:: + + If you see unusual version numbers for packages but ``python setup.py + --version`` reports the expected version number, ensure ``[egg_info]`` is + not defined in ``setup.cfg``. + + +``setup.cfg`` usage (deprecated) +------------------------------------ + +as ``setup_requires`` is deprecated in favour of ``pyproject.toml`` +usage in ``setup.cfg`` is considered deprecated, +please use ``pyproject.toml`` whenever possible. + +Programmatic usage +------------------ + +In order to use ``setuptools_scm`` from code that is one directory deeper +than the project's root, you can use: + +.. code:: python + + from setuptools_scm import get_version + version = get_version(root='..', relative_to=__file__) + +See `setup.py Usage (deprecated)`_ above for how to use this within ``setup.py``. + + +Retrieving package version at runtime +------------------------------------- + +If you have opted not to hardcode the version number inside the package, +you can retrieve it at runtime from PEP-0566_ metadata using +``importlib.metadata`` from the standard library (added in Python 3.8) +or the `importlib_metadata`_ backport: + +.. code:: python + + from importlib.metadata import version, PackageNotFoundError + + try: + __version__ = version("package-name") + except PackageNotFoundError: + # package is not installed + pass + +Alternatively, you can use ``pkg_resources`` which is included in +``setuptools`` (but has a significant runtime cost): + +.. code:: python + + from pkg_resources import get_distribution, DistributionNotFound + + try: + __version__ = get_distribution("package-name").version + except DistributionNotFound: + # package is not installed + pass + +However, this does place a runtime dependency on ``setuptools`` and can add up to +a few 100ms overhead for the package import time. + +.. _PEP-0566: https://www.python.org/dev/peps/pep-0566/ +.. _importlib_metadata: https://pypi.org/project/importlib-metadata/ + + +Usage from Sphinx +----------------- + +It is discouraged to use ``setuptools_scm`` from Sphinx itself, +instead use ``importlib.metadata`` after editable/real installation: + +.. code:: python + + # contents of docs/conf.py + from importlib.metadata import version + release = version('myproject') + # for example take major/minor + version = '.'.join(release.split('.')[:2]) + +The underlying reason is, that services like *Read the Docs* sometimes change +the working directory for good reasons and using the installed metadata +prevents using needless volatile data there. + +Notable Plugins +--------------- + +`setuptools_scm_git_archive `_ + Provides partial support for obtaining versions from git archives that + belong to tagged versions. The only reason for not including it in + ``setuptools_scm`` itself is Git/GitHub not supporting sufficient metadata + for untagged/followup commits, which is preventing a consistent UX. + + +Default versioning scheme +------------------------- + +In the standard configuration ``setuptools_scm`` takes a look at three things: + +1. latest tag (with a version number) +2. the distance to this tag (e.g. number of revisions since latest tag) +3. workdir state (e.g. uncommitted changes since latest tag) + +and uses roughly the following logic to render the version: + +no distance and clean: + ``{tag}`` +distance and clean: + ``{next_version}.dev{distance}+{scm letter}{revision hash}`` +no distance and not clean: + ``{tag}+dYYYYMMDD`` +distance and not clean: + ``{next_version}.dev{distance}+{scm letter}{revision hash}.dYYYYMMDD`` + +The next version is calculated by adding ``1`` to the last numeric component of +the tag. + + +For Git projects, the version relies on `git describe `_, +so you will see an additional ``g`` prepended to the ``{revision hash}``. + +Semantic Versioning (SemVer) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Due to the default behavior it's necessary to always include a +patch version (the ``3`` in ``1.2.3``), or else the automatic guessing +will increment the wrong part of the SemVer (e.g. tag ``2.0`` results in +``2.1.devX`` instead of ``2.0.1.devX``). So please make sure to tag +accordingly. + +.. note:: + + Future versions of ``setuptools_scm`` will switch to `SemVer + `_ by default hiding the the old behavior as an + configurable option. + + +Builtin mechanisms for obtaining version numbers +------------------------------------------------ + +1. the SCM itself (git/hg) +2. ``.hg_archival`` files (mercurial archives) +3. ``PKG-INFO`` + +.. note:: + + Git archives are not supported due to Git shortcomings + + +File finders hook makes most of MANIFEST.in unnecessary +------------------------------------------------------- + +``setuptools_scm`` implements a `file_finders +`_ +entry point which returns all files tracked by your SCM. This eliminates +the need for a manually constructed ``MANIFEST.in`` in most cases where this +would be required when not using ``setuptools_scm``, namely: + +* To ensure all relevant files are packaged when running the ``sdist`` command. + +* When using `include_package_data `_ + to include package data as part of the ``build`` or ``bdist_wheel``. + +``MANIFEST.in`` may still be used: anything defined there overrides the hook. +This is mostly useful to exclude files tracked in your SCM from packages, +although in principle it can be used to explicitly include non-tracked files +too. + + +Configuration parameters +------------------------ + +In order to configure the way ``use_scm_version`` works you can provide +a mapping with options instead of a boolean value. + +The currently supported configuration keys are: + +:root: + Relative path to cwd, used for finding the SCM root; defaults to ``.`` + +:version_scheme: + Configures how the local version number is constructed; either an + entrypoint name or a callable. + +:local_scheme: + Configures how the local component of the version is constructed; either an + entrypoint name or a callable. + +:write_to: + A path to a file that gets replaced with a file containing the current + version. It is ideal for creating a ``_version.py`` file within the + package, typically used to avoid using `pkg_resources.get_distribution` + (which adds some overhead). + + .. warning:: + + Only files with :code:`.py` and :code:`.txt` extensions have builtin + templates, for other file types it is necessary to provide + :code:`write_to_template`. + +:write_to_template: + A newstyle format string that is given the current version as + the ``version`` keyword argument for formatting. + +:relative_to: + A file from which the root can be resolved. + Typically called by a script or module that is not in the root of the + repository to point ``setuptools_scm`` at the root of the repository by + supplying ``__file__``. + +:tag_regex: + A Python regex string to extract the version part from any SCM tag. + The regex needs to contain either a single match group, or a group + named ``version``, that captures the actual version information. + + Defaults to the value of ``setuptools_scm.config.DEFAULT_TAG_REGEX`` + (see `config.py `_). + +:parentdir_prefix_version: + If the normal methods for detecting the version (SCM version, + sdist metadata) fail, and the parent directory name starts with + ``parentdir_prefix_version``, then this prefix is stripped and the rest of + the parent directory name is matched with ``tag_regex`` to get a version + string. If this parameter is unset (the default), then this fallback is + not used. + + This is intended to cover GitHub's "release tarballs", which extract into + directories named ``projectname-tag/`` (in which case + ``parentdir_prefix_version`` can be set e.g. to ``projectname-``). + +:fallback_version: + A version string that will be used if no other method for detecting the + version worked (e.g., when using a tarball with no metadata). If this is + unset (the default), setuptools_scm will error if it fails to detect the + version. + +:parse: + A function that will be used instead of the discovered SCM for parsing the + version. + Use with caution, this is a function for advanced use, and you should be + familiar with the ``setuptools_scm`` internals to use it. + +:git_describe_command: + This command will be used instead the default ``git describe`` command. + Use with caution, this is a function for advanced use, and you should be + familiar with the ``setuptools_scm`` internals to use it. + + Defaults to the value set by ``setuptools_scm.git.DEFAULT_DESCRIBE`` + (see `git.py `_). + +:normalize: + A boolean flag indicating if the version string should be normalized. + Defaults to ``True``. Setting this to ``False`` is equivalent to setting + ``version_cls`` to ``setuptools_scm.version.NonNormalizedVersion`` + +:version_cls: + An optional class used to parse, verify and possibly normalize the version + string. Its constructor should receive a single string argument, and its + ``str`` should return the normalized version string to use. + This option can also receive a class qualified name as a string. + + This defaults to ``packaging.version.Version`` if available. If + ``packaging`` is not installed, ``pkg_resources.packaging.version.Version`` + is used. Note that it is known to modify git release candidate schemes. + + The ``setuptools_scm.NonNormalizedVersion`` convenience class is + provided to disable the normalization step done by + ``packaging.version.Version``. If this is used while ``setuptools_scm`` + is integrated in a setuptools packaging process, the non-normalized + version number will appear in all files (see ``write_to``) BUT note + that setuptools will still normalize it to create the final distribution, + so as to stay compliant with the python packaging standards. + + +To use ``setuptools_scm`` in other Python code you can use the ``get_version`` +function: + +.. code:: python + + from setuptools_scm import get_version + my_version = get_version() + +It optionally accepts the keys of the ``use_scm_version`` parameter as +keyword arguments. + +Example configuration in ``setup.py`` format: + +.. code:: python + + from setuptools import setup + + setup( + use_scm_version={ + 'write_to': '_version.py', + 'write_to_template': '__version__ = "{version}"', + 'tag_regex': r'^(?Pv)?(?P[^\+]+)(?P.*)?$', + } + ) + +Environment variables +--------------------- + +:SETUPTOOLS_SCM_PRETEND_VERSION: + when defined and not empty, + its used as the primary source for the version number + in which case it will be a unparsed string + + +:SETUPTOOLS_SCM_PRETEND_VERSION_FOR_${UPPERCASED_DIST_NAME}: + when defined and not empty, + its used as the primary source for the version number + in which case it will be a unparsed string + + it takes precedence over ``SETUPTOOLS_SCM_PRETEND_VERSION`` + + +:SETUPTOOLS_SCM_DEBUG: + when defined and not empty, + a lot of debug information will be printed as part of ``setuptools_scm`` + operating + +:SOURCE_DATE_EPOCH: + when defined, used as the timestamp from which the + ``node-and-date`` and ``node-and-timestamp`` local parts are + derived, otherwise the current time is used + (https://reproducible-builds.org/docs/source-date-epoch/) + + +:SETUPTOOLS_SCM_IGNORE_VCS_ROOTS: + when defined, a ``os.pathsep`` separated list + of directory names to ignore for root finding + +Extending setuptools_scm +------------------------ + +``setuptools_scm`` ships with a few ``setuptools`` entrypoints based hooks to +extend its default capabilities. + +Adding a new SCM +~~~~~~~~~~~~~~~~ + +``setuptools_scm`` provides two entrypoints for adding new SCMs: + +``setuptools_scm.parse_scm`` + A function used to parse the metadata of the current workdir + using the name of the control directory/file of your SCM as the + entrypoint's name. E.g. for the built-in entrypoint for git the + entrypoint is named ``.git`` and references ``setuptools_scm.git:parse`` + + The return value MUST be a ``setuptools_scm.version.ScmVersion`` instance + created by the function ``setuptools_scm.version:meta``. + +``setuptools_scm.files_command`` + Either a string containing a shell command that prints all SCM managed + files in its current working directory or a callable, that given a + pathname will return that list. + + Also use then name of your SCM control directory as name of the entrypoint. + +Version number construction +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``setuptools_scm.version_scheme`` + Configures how the version number is constructed given a + ``setuptools_scm.version.ScmVersion`` instance and should return a string + representing the version. + + Available implementations: + + :guess-next-dev: Automatically guesses the next development version (default). + Guesses the upcoming release by incrementing the pre-release segment if present, + otherwise by incrementing the micro segment. Then appends :code:`.devN`. + In case the tag ends with ``.dev0`` the version is not bumped + and custom ``.devN`` versions will trigger a error. + :post-release: generates post release versions (adds :code:`.postN`) + :python-simplified-semver: Basic semantic versioning. Guesses the upcoming release + by incrementing the minor segment and setting the micro segment to zero if the + current branch contains the string ``'feature'``, otherwise by incrementing the + micro version. Then appends :code:`.devN`. Not compatible with pre-releases. + :release-branch-semver: Semantic versioning for projects with release branches. The + same as ``guess-next-dev`` (incrementing the pre-release or micro segment) if on + a release branch: a branch whose name (ignoring namespace) parses as a version + that matches the most recent tag up to the minor segment. Otherwise if on a + non-release branch, increments the minor segment and sets the micro segment to + zero, then appends :code:`.devN`. + :no-guess-dev: Does no next version guessing, just adds :code:`.post1.devN` + +``setuptools_scm.local_scheme`` + Configures how the local part of a version is rendered given a + ``setuptools_scm.version.ScmVersion`` instance and should return a string + representing the local version. + Dates and times are in Coordinated Universal Time (UTC), because as part + of the version, they should be location independent. + + Available implementations: + + :node-and-date: adds the node on dev versions and the date on dirty + workdir (default) + :node-and-timestamp: like ``node-and-date`` but with a timestamp of + the form ``{:%Y%m%d%H%M%S}`` instead + :dirty-tag: adds ``+dirty`` if the current workdir has changes + :no-local-version: omits local version, useful e.g. because pypi does + not support it + + +Importing in ``setup.py`` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +To support usage in ``setup.py`` passing a callable into ``use_scm_version`` +is supported. + +Within that callable, ``setuptools_scm`` is available for import. +The callable must return the configuration. + + +.. code:: python + + # content of setup.py + import setuptools + + def myversion(): + from setuptools_scm.version import get_local_dirty_tag + def clean_scheme(version): + return get_local_dirty_tag(version) if version.dirty else '+clean' + + return {'local_scheme': clean_scheme} + + setup( + ..., + use_scm_version=myversion, + ... + ) + + +Note on testing non-installed versions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +While the general advice is to test against a installed version, +some environments require a test prior to install, + +.. code:: + + $ python setup.py egg_info + $ PYTHONPATH=$PWD:$PWD/src pytest + + +Interaction with Enterprise Distributions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Some enterprise distributions like RHEL7 and others +ship rather old setuptools versions due to various release management details. + +In those case its typically possible to build by using a sdist against ``setuptools_scm<2.0``. +As those old setuptools versions lack sensible types for versions, +modern setuptools_scm is unable to support them sensibly. + +In case the project you need to build can not be patched to either use old setuptools_scm, +its still possible to install a more recent version of setuptools in order to handle the build +and/or install the package by using wheels or eggs. + + + +Code of Conduct +--------------- + +Everyone interacting in the ``setuptools_scm`` project's codebases, issue +trackers, chat rooms, and mailing lists is expected to follow the +`PSF Code of Conduct`_. + +.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md + +Security Contact +================ + +To report a security vulnerability, please use the +`Tidelift security contact `_. +Tidelift will coordinate the fix and disclosure. + + diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/RECORD b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/RECORD new file mode 100644 index 000000000..d148bc7f0 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/RECORD @@ -0,0 +1,23 @@ +setuptools_scm/__init__.py,sha256=zWUSg4yZvzIQ6F5s6kLhYoqLyeP70TaW2ZWTeu0Upqg,6274 +setuptools_scm/__main__.py,sha256=N1ovM8yVFiMh55m0JxeKpyTxky9RX_jsMNY8Bd6TUsw,295 +setuptools_scm/_version_cls.py,sha256=OqpnIzcegf4mPaiMy6QLHflwoxiApI8Qo0zkZe2fom8,1428 +setuptools_scm/config.py,sha256=jqMkPKLhagFfDkZc7WxcNBZoq9R3FTFmW9VDTivKYqc,6733 +setuptools_scm/discover.py,sha256=rncA7Go947oa3PRoMA1306GRXxd4Q23DRFvdbXBQT34,1557 +setuptools_scm/file_finder.py,sha256=JHCd6G6m3Df4iwUgszgylKfYNDEJWM2v8v6RuldfiY8,2554 +setuptools_scm/file_finder_git.py,sha256=fyTvB3qkYvrIv_K6j0V7Pxo3--MmW6qHRnoN4y50XjM,3244 +setuptools_scm/file_finder_hg.py,sha256=v03QeJOnHJsIYR-JCy-4ntTthg0zzDGhvmFMMKGz3l0,1492 +setuptools_scm/git.py,sha256=PrtlB917DAKSrahkgzx0TiYZq1QN1MkiHmTO0g4wskU,6204 +setuptools_scm/hacks.py,sha256=Y1tBCq3PsrovBSxgQBouEHW_iW7N_E5ADN7w0OiHrr8,1316 +setuptools_scm/hg.py,sha256=XZQQfIsQC8o6r_S-vaaT4U-TSkOQkYyxEbk5dGGQhkQ,5074 +setuptools_scm/hg_git.py,sha256=QnFvA2WcykTaa3p8sE33_IIPo-RJQ-sr9xYEvYAJHsk,3499 +setuptools_scm/integration.py,sha256=WxSJcsa4oE76wFZoJU3fJEpLFFKVWjRe8a1c-y7tlWM,2732 +setuptools_scm/scm_workdir.py,sha256=k0w7Cct1cHTnFM3FWY0gNg1i8RKbDUNJf6YjABHnKBk,322 +setuptools_scm/utils.py,sha256=32_SlZLAXc9KqUEOBExvXhU2fDyDk_A0DvmQ-4AMr2s,3963 +setuptools_scm/version.py,sha256=NqB0rGfmEgVkUc-m7nPyMuUb7V7fBVMqtmYGkVBcXWs,13818 +setuptools_scm-6.3.2.dist-info/LICENSE,sha256=iYB6zyMJvShfAzQE7nhYFgLzzZuBmhasLw5fYP9KRz4,1023 +setuptools_scm-6.3.2.dist-info/METADATA,sha256=tZATPwhkHQ_8bRhfJaCI2zbgSd3TiMdnwCD_JODppKs,22252 +setuptools_scm-6.3.2.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +setuptools_scm-6.3.2.dist-info/entry_points.txt,sha256=LzLFBv9B2emlz6AljQ8nhREGOeg8BUZAZq0am6uY9j4,1440 +setuptools_scm-6.3.2.dist-info/top_level.txt,sha256=kiu-91q3_rJLUoc2wl8_lC4cIlpgtgdD_4NaChF4hOA,15 +setuptools_scm-6.3.2.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +setuptools_scm-6.3.2.dist-info/RECORD,, diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/WHEEL b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/WHEEL new file mode 100644 index 000000000..5bad85fdc --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/entry_points.txt b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/entry_points.txt new file mode 100644 index 000000000..88df81355 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/entry_points.txt @@ -0,0 +1,37 @@ +[distutils.setup_keywords] +use_scm_version = setuptools_scm.integration:version_keyword + +[setuptools.file_finders] +setuptools_scm = setuptools_scm.integration:find_files + +[setuptools.finalize_distribution_options] +setuptools_scm = setuptools_scm.integration:infer_version + +[setuptools_scm.files_command] +.git = setuptools_scm.file_finder_git:git_find_files +.hg = setuptools_scm.file_finder_hg:hg_find_files + +[setuptools_scm.local_scheme] +dirty-tag = setuptools_scm.version:get_local_dirty_tag +no-local-version = setuptools_scm.version:get_no_local_node +node-and-date = setuptools_scm.version:get_local_node_and_date +node-and-timestamp = setuptools_scm.version:get_local_node_and_timestamp + +[setuptools_scm.parse_scm] +.git = setuptools_scm.git:parse +.hg = setuptools_scm.hg:parse + +[setuptools_scm.parse_scm_fallback] +.hg_archival.txt = setuptools_scm.hg:parse_archival +PKG-INFO = setuptools_scm.hacks:parse_pkginfo +pip-egg-info = setuptools_scm.hacks:parse_pip_egg_info +setup.py = setuptools_scm.hacks:fallback_version + +[setuptools_scm.version_scheme] +calver-by-date = setuptools_scm.version:calver_by_date +guess-next-dev = setuptools_scm.version:guess_next_dev_version +no-guess-dev = setuptools_scm.version:no_guess_dev_version +post-release = setuptools_scm.version:postrelease_version +python-simplified-semver = setuptools_scm.version:simplified_semver_version +release-branch-semver = setuptools_scm.version:release_branch_semver_version + diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/requires.txt b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/requires.txt new file mode 100644 index 000000000..bd01a6c23 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/requires.txt @@ -0,0 +1,6 @@ +packaging>=20.0 +setuptools +tomli>=1.0.0 + +[toml] +setuptools>=42 diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/top_level.txt b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/top_level.txt new file mode 100644 index 000000000..cba8d8860 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/top_level.txt @@ -0,0 +1 @@ +setuptools_scm diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/zip-safe b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/zip-safe new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/zip-safe @@ -0,0 +1 @@ + diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__init__.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__init__.py new file mode 100644 index 000000000..b4f86eae3 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__init__.py @@ -0,0 +1,212 @@ +""" +:copyright: 2010-2015 by Ronny Pfannschmidt +:license: MIT +""" +import os +import warnings + +from ._version_cls import NonNormalizedVersion +from ._version_cls import Version +from .config import Configuration +from .config import DEFAULT_LOCAL_SCHEME +from .config import DEFAULT_TAG_REGEX +from .config import DEFAULT_VERSION_SCHEME +from .discover import iter_matching_entrypoints +from .utils import function_has_arg +from .utils import trace +from .version import format_version +from .version import meta + +PRETEND_KEY = "SETUPTOOLS_SCM_PRETEND_VERSION" +PRETEND_KEY_NAMED = PRETEND_KEY + "_FOR_{name}" + +TEMPLATES = { + ".py": """\ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = {version!r} +version_tuple = {version_tuple!r} +""", + ".txt": "{version}", +} + + +def version_from_scm(root): + warnings.warn( + "version_from_scm is deprecated please use get_version", + category=DeprecationWarning, + stacklevel=2, + ) + config = Configuration(root=root) + # TODO: Is it API? + return _version_from_entrypoints(config) + + +def _call_entrypoint_fn(root, config, fn): + if function_has_arg(fn, "config"): + return fn(root, config=config) + else: + warnings.warn( + f"parse function {fn.__module__}.{fn.__name__}" + " are required to provide a named argument" + " 'config', setuptools_scm>=8.0 will remove support.", + category=DeprecationWarning, + stacklevel=2, + ) + return fn(root) + + +def _version_from_entrypoints(config: Configuration, fallback=False): + if fallback: + entrypoint = "setuptools_scm.parse_scm_fallback" + root = config.fallback_root + else: + entrypoint = "setuptools_scm.parse_scm" + root = config.absolute_root + + for ep in iter_matching_entrypoints(root, entrypoint, config): + version = _call_entrypoint_fn(root, config, ep.load()) + trace(ep, version) + if version: + return version + + +def dump_version(root, version, write_to, template=None): + assert isinstance(version, str) + if not write_to: + return + target = os.path.normpath(os.path.join(root, write_to)) + ext = os.path.splitext(target)[1] + template = template or TEMPLATES.get(ext) + + if template is None: + raise ValueError( + "bad file format: '{}' (of {}) \nonly *.txt and *.py are supported".format( + os.path.splitext(target)[1], target + ) + ) + + parsed_version = Version(version) + version_fields = parsed_version.release + if parsed_version.dev is not None: + version_fields += (f"dev{parsed_version.dev}",) + if parsed_version.local is not None: + version_fields += (parsed_version.local,) + + with open(target, "w") as fp: + fp.write(template.format(version=version, version_tuple=tuple(version_fields))) + + +def _do_parse(config): + + trace("dist name:", config.dist_name) + if config.dist_name is not None: + pretended = os.environ.get( + PRETEND_KEY_NAMED.format(name=config.dist_name.upper()) + ) + else: + pretended = None + + if pretended is None: + pretended = os.environ.get(PRETEND_KEY) + + if pretended: + # we use meta here since the pretended version + # must adhere to the pep to begin with + return meta(tag=pretended, preformatted=True, config=config) + + if config.parse: + parse_result = _call_entrypoint_fn(config.absolute_root, config, config.parse) + if isinstance(parse_result, str): + raise TypeError( + "version parse result was a string\nplease return a parsed version" + ) + version = parse_result or _version_from_entrypoints(config, fallback=True) + else: + # include fallbacks after dropping them from the main entrypoint + version = _version_from_entrypoints(config) or _version_from_entrypoints( + config, fallback=True + ) + + if version: + return version + + raise LookupError( + "setuptools-scm was unable to detect version for %r.\n\n" + "Make sure you're either building from a fully intact git repository " + "or PyPI tarballs. Most other sources (such as GitHub's tarballs, a " + "git checkout without the .git folder) don't contain the necessary " + "metadata and will not work.\n\n" + "For example, if you're using pip, instead of " + "https://github.com/user/proj/archive/master.zip " + "use git+https://github.com/user/proj.git#egg=proj" % config.absolute_root + ) + + +def get_version( + root=".", + version_scheme=DEFAULT_VERSION_SCHEME, + local_scheme=DEFAULT_LOCAL_SCHEME, + write_to=None, + write_to_template=None, + relative_to=None, + tag_regex=DEFAULT_TAG_REGEX, + parentdir_prefix_version=None, + fallback_version=None, + fallback_root=".", + parse=None, + git_describe_command=None, + dist_name=None, + version_cls=None, + normalize=True, + search_parent_directories=False, +): + """ + If supplied, relative_to should be a file from which root may + be resolved. Typically called by a script or module that is not + in the root of the repository to direct setuptools_scm to the + root of the repository by supplying ``__file__``. + """ + + config = Configuration(**locals()) + return _get_version(config) + + +def _get_version(config): + parsed_version = _do_parse(config) + + if parsed_version: + version_string = format_version( + parsed_version, + version_scheme=config.version_scheme, + local_scheme=config.local_scheme, + ) + dump_version( + root=config.root, + version=version_string, + write_to=config.write_to, + template=config.write_to_template, + ) + + return version_string + + +# Public API +__all__ = [ + "get_version", + "dump_version", + "version_from_scm", + "Configuration", + "DEFAULT_VERSION_SCHEME", + "DEFAULT_LOCAL_SCHEME", + "DEFAULT_TAG_REGEX", + "Version", + "NonNormalizedVersion", + # TODO: are the symbols below part of public API ? + "function_has_arg", + "trace", + "format_version", + "meta", + "iter_matching_entrypoints", +] diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__main__.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__main__.py new file mode 100644 index 000000000..f3377b055 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__main__.py @@ -0,0 +1,15 @@ +import sys + +from setuptools_scm import get_version +from setuptools_scm.integration import find_files + + +def main() -> None: + print("Guessed Version", get_version()) + if "ls" in sys.argv: + for fname in find_files("."): + print(fname) + + +if __name__ == "__main__": + main() diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/_version_cls.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/_version_cls.py new file mode 100644 index 000000000..0cefb2679 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/_version_cls.py @@ -0,0 +1,49 @@ +try: + from packaging.version import Version + + assert hasattr(Version, "release") +except ImportError: + from pkg_resources._vendor.packaging.version import Version as SetuptoolsVersion + + try: + SetuptoolsVersion.release + Version = SetuptoolsVersion + except AttributeError: + + class Version(SetuptoolsVersion): # type: ignore + @property + def release(self): + return self._version.release + + @property + def dev(self): + return self._version.dev + + @property + def local(self): + return self._version.local + + +class NonNormalizedVersion(Version): + """A non-normalizing version handler. + + You can use this class to preserve version verification but skip normalization. + For example you can use this to avoid git release candidate version tags + ("1.0.0-rc1") to be normalized to "1.0.0rc1". Only use this if you fully + trust the version tags. + """ + + def __init__(self, version): + # parse and validate using parent + super().__init__(version) + + # store raw for str + self._raw_version = version + + def __str__(self): + # return the non-normalized version (parent returns the normalized) + return self._raw_version + + def __repr__(self): + # same pattern as parent + return f"" diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/config.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/config.py new file mode 100644 index 000000000..6bcf446f3 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/config.py @@ -0,0 +1,212 @@ +""" configuration """ +import os +import re +import warnings + +from ._version_cls import NonNormalizedVersion +from ._version_cls import Version +from .utils import trace + +DEFAULT_TAG_REGEX = r"^(?:[\w-]+-)?(?P[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$" +DEFAULT_VERSION_SCHEME = "guess-next-dev" +DEFAULT_LOCAL_SCHEME = "node-and-date" + + +def _check_tag_regex(value): + if not value: + value = DEFAULT_TAG_REGEX + regex = re.compile(value) + + group_names = regex.groupindex.keys() + if regex.groups == 0 or (regex.groups > 1 and "version" not in group_names): + warnings.warn( + "Expected tag_regex to contain a single match group or a group named" + " 'version' to identify the version part of any tag." + ) + + return regex + + +def _check_absolute_root(root, relative_to): + trace("l", repr(locals())) + if relative_to: + if os.path.isabs(root) and not root.startswith(relative_to): + warnings.warn( + "absolute root path '%s' overrides relative_to '%s'" + % (root, relative_to) + ) + if os.path.isdir(relative_to): + warnings.warn( + "relative_to is expected to be a file," + " its the directory %r\n" + "assuming the parent directory was passed" % (relative_to,) + ) + trace("dir", relative_to) + root = os.path.join(relative_to, root) + else: + trace("file", relative_to) + root = os.path.join(os.path.dirname(relative_to), root) + return os.path.abspath(root) + + +def _lazy_tomli_load(data): + from tomli import loads + + return loads(data) + + +class Configuration: + """Global configuration model""" + + def __init__( + self, + relative_to=None, + root=".", + version_scheme=DEFAULT_VERSION_SCHEME, + local_scheme=DEFAULT_LOCAL_SCHEME, + write_to=None, + write_to_template=None, + tag_regex=DEFAULT_TAG_REGEX, + parentdir_prefix_version=None, + fallback_version=None, + fallback_root=".", + parse=None, + git_describe_command=None, + dist_name=None, + version_cls=None, + normalize=True, + search_parent_directories=False, + ): + # TODO: + self._relative_to = relative_to + self._root = "." + + self.root = root + self.version_scheme = version_scheme + self.local_scheme = local_scheme + self.write_to = write_to + self.write_to_template = write_to_template + self.parentdir_prefix_version = parentdir_prefix_version + self.fallback_version = fallback_version + self.fallback_root = fallback_root + self.parse = parse + self.tag_regex = tag_regex + self.git_describe_command = git_describe_command + self.dist_name = dist_name + self.search_parent_directories = search_parent_directories + self.parent = None + + if not normalize: + # `normalize = False` means `version_cls = NonNormalizedVersion` + if version_cls is not None: + raise ValueError( + "Providing a custom `version_cls` is not permitted when " + "`normalize=False`" + ) + self.version_cls = NonNormalizedVersion + else: + # Use `version_cls` if provided, default to packaging or pkg_resources + if version_cls is None: + version_cls = Version + elif isinstance(version_cls, str): + try: + # Not sure this will work in old python + import importlib + + pkg, cls_name = version_cls.rsplit(".", 1) + version_cls_host = importlib.import_module(pkg) + version_cls = getattr(version_cls_host, cls_name) + except: # noqa + raise ValueError(f"Unable to import version_cls='{version_cls}'") + self.version_cls = version_cls + + @property + def fallback_root(self): + return self._fallback_root + + @fallback_root.setter + def fallback_root(self, value): + self._fallback_root = os.path.abspath(value) + + @property + def absolute_root(self): + return self._absolute_root + + @property + def relative_to(self): + return self._relative_to + + @relative_to.setter + def relative_to(self, value): + self._absolute_root = _check_absolute_root(self._root, value) + self._relative_to = value + trace("root", repr(self._absolute_root)) + trace("relative_to", repr(value)) + + @property + def root(self): + return self._root + + @root.setter + def root(self, value): + self._absolute_root = _check_absolute_root(value, self._relative_to) + self._root = value + trace("root", repr(self._absolute_root)) + trace("relative_to", repr(self._relative_to)) + + @property + def tag_regex(self): + return self._tag_regex + + @tag_regex.setter + def tag_regex(self, value): + self._tag_regex = _check_tag_regex(value) + + @classmethod + def from_file( + cls, + name: str = "pyproject.toml", + dist_name=None, # type: str | None + _load_toml=_lazy_tomli_load, + ): + """ + Read Configuration from pyproject.toml (or similar). + Raises exceptions when file is not found or toml is + not installed or the file has invalid format or does + not contain the [tool.setuptools_scm] section. + """ + + with open(name, encoding="UTF-8") as strm: + data = strm.read() + defn = _load_toml(data) + try: + section = defn.get("tool", {})["setuptools_scm"] + except LookupError as e: + raise LookupError( + f"{name} does not contain a tool.setuptools_scm section" + ) from e + if "dist_name" in section: + if dist_name is None: + dist_name = section.pop("dist_name") + else: + assert dist_name == section["dist_name"] + del section["dist_name"] + if dist_name is None: + if "project" in defn: + # minimal pep 621 support for figuring the pretend keys + dist_name = defn["project"].get("name") + if dist_name is None: + dist_name = _read_dist_name_from_setup_cfg() + + return cls(dist_name=dist_name, **section) + + +def _read_dist_name_from_setup_cfg(): + + # minimal effort to read dist_name off setup.cfg metadata + import configparser + + parser = configparser.ConfigParser() + parser.read(["setup.cfg"]) + dist_name = parser.get("metadata", "name", fallback=None) + return dist_name diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/discover.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/discover.py new file mode 100644 index 000000000..f2aee17a8 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/discover.py @@ -0,0 +1,58 @@ +import os + +from .config import Configuration +from .utils import iter_entry_points +from .utils import trace + + +def walk_potential_roots(root, search_parents=True): + """ + Iterate though a path and each of its parents. + :param root: File path. + :param search_parents: If ``False`` the parents are not considered. + """ + + if not search_parents: + yield root + return + + tail = root + + while tail: + yield root + root, tail = os.path.split(root) + + +def match_entrypoint(root, name): + """ + Consider a ``root`` as entry-point. + :param root: File path. + :param name: Subdirectory name. + :return: ``True`` if a subdirectory ``name`` exits in ``root``. + """ + + if os.path.exists(os.path.join(root, name)): + if not os.path.isabs(name): + return True + trace("ignoring bad ep", name) + + return False + + +def iter_matching_entrypoints(root, entrypoint, config: Configuration): + """ + Consider different entry-points in ``root`` and optionally its parents. + :param root: File path. + :param entrypoint: Entry-point to consider. + :param config: Configuration, + read ``search_parent_directories``, write found parent to ``parent``. + """ + + trace("looking for ep", entrypoint, root) + + for wd in walk_potential_roots(root, config.search_parent_directories): + for ep in iter_entry_points(entrypoint): + if match_entrypoint(wd, ep.name): + trace("found ep", ep, "in", wd) + config.parent = wd + yield ep diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder.py new file mode 100644 index 000000000..466602482 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder.py @@ -0,0 +1,70 @@ +import os + +from .utils import trace + + +def scm_find_files(path, scm_files, scm_dirs): + """ setuptools compatible file finder that follows symlinks + + - path: the root directory from which to search + - scm_files: set of scm controlled files and symlinks + (including symlinks to directories) + - scm_dirs: set of scm controlled directories + (including directories containing no scm controlled files) + + scm_files and scm_dirs must be absolute with symlinks resolved (realpath), + with normalized case (normcase) + + Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\ + adding-support-for-revision-control-systems + """ + realpath = os.path.normcase(os.path.realpath(path)) + seen = set() + res = [] + for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True): + # dirpath with symlinks resolved + realdirpath = os.path.normcase(os.path.realpath(dirpath)) + + def _link_not_in_scm(n): + fn = os.path.join(realdirpath, os.path.normcase(n)) + return os.path.islink(fn) and fn not in scm_files + + if realdirpath not in scm_dirs: + # directory not in scm, don't walk it's content + dirnames[:] = [] + continue + if os.path.islink(dirpath) and not os.path.relpath( + realdirpath, realpath + ).startswith(os.pardir): + # a symlink to a directory not outside path: + # we keep it in the result and don't walk its content + res.append(os.path.join(path, os.path.relpath(dirpath, path))) + dirnames[:] = [] + continue + if realdirpath in seen: + # symlink loop protection + dirnames[:] = [] + continue + dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)] + for filename in filenames: + if _link_not_in_scm(filename): + continue + # dirpath + filename with symlinks preserved + fullfilename = os.path.join(dirpath, filename) + if os.path.normcase(os.path.realpath(fullfilename)) in scm_files: + res.append(os.path.join(path, os.path.relpath(fullfilename, realpath))) + seen.add(realdirpath) + return res + + +def is_toplevel_acceptable(toplevel): + """ """ + if toplevel is None: + return False + + ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep) + ignored = [os.path.normcase(p) for p in ignored] + + trace(toplevel, ignored) + + return toplevel not in ignored diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_git.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_git.py new file mode 100644 index 000000000..c6f96d8ac --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_git.py @@ -0,0 +1,93 @@ +import logging +import os +import subprocess +import tarfile + +from .file_finder import is_toplevel_acceptable +from .file_finder import scm_find_files +from .utils import do_ex +from .utils import trace + +log = logging.getLogger(__name__) + + +def _git_toplevel(path): + try: + cwd = os.path.abspath(path or ".") + out, err, ret = do_ex(["git", "rev-parse", "HEAD"], cwd=cwd) + if ret != 0: + # BAIL if there is no commit + log.error("listing git files failed - pretending there aren't any") + return None + out, err, ret = do_ex( + ["git", "rev-parse", "--show-prefix"], + cwd=cwd, + ) + if ret != 0: + return None + out = out.strip()[:-1] # remove the trailing pathsep + if not out: + out = cwd + else: + # Here, ``out`` is a relative path to root of git. + # ``cwd`` is absolute path to current working directory. + # the below method removes the length of ``out`` from + # ``cwd``, which gives the git toplevel + assert cwd.replace("\\", "/").endswith(out), f"cwd={cwd!r}\nout={out!r}" + # In windows cwd contains ``\`` which should be replaced by ``/`` + # for this assertion to work. Length of string isn't changed by replace + # ``\\`` is just and escape for `\` + out = cwd[: -len(out)] + trace("find files toplevel", out) + return os.path.normcase(os.path.realpath(out.strip())) + except subprocess.CalledProcessError: + # git returned error, we are not in a git repo + return None + except OSError: + # git command not found, probably + return None + + +def _git_interpret_archive(fd, toplevel): + with tarfile.open(fileobj=fd, mode="r|*") as tf: + git_files = set() + git_dirs = {toplevel} + for member in tf.getmembers(): + name = os.path.normcase(member.name).replace("/", os.path.sep) + if member.type == tarfile.DIRTYPE: + git_dirs.add(name) + else: + git_files.add(name) + return git_files, git_dirs + + +def _git_ls_files_and_dirs(toplevel): + # use git archive instead of git ls-file to honor + # export-ignore git attribute + + cmd = ["git", "archive", "--prefix", toplevel + os.path.sep, "HEAD"] + proc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, cwd=toplevel, stderr=subprocess.DEVNULL + ) + try: + try: + return _git_interpret_archive(proc.stdout, toplevel) + finally: + # ensure we avoid resource warnings by cleaning up the process + proc.stdout.close() + proc.terminate() + except Exception: + if proc.wait() != 0: + log.error("listing git files failed - pretending there aren't any") + return (), () + + +def git_find_files(path=""): + toplevel = _git_toplevel(path) + if not is_toplevel_acceptable(toplevel): + return [] + fullpath = os.path.abspath(os.path.normpath(path)) + if not fullpath.startswith(toplevel): + trace("toplevel mismatch", toplevel, fullpath) + git_files, git_dirs = _git_ls_files_and_dirs(toplevel) + return scm_find_files(path, git_files, git_dirs) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_hg.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_hg.py new file mode 100644 index 000000000..53878c6a3 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_hg.py @@ -0,0 +1,49 @@ +import os +import subprocess + +from .file_finder import is_toplevel_acceptable +from .file_finder import scm_find_files +from .utils import do_ex + + +def _hg_toplevel(path): + try: + with open(os.devnull, "wb") as devnull: + out = subprocess.check_output( + ["hg", "root"], + cwd=(path or "."), + universal_newlines=True, + stderr=devnull, + ) + return os.path.normcase(os.path.realpath(out.strip())) + except subprocess.CalledProcessError: + # hg returned error, we are not in a mercurial repo + return None + except OSError: + # hg command not found, probably + return None + + +def _hg_ls_files_and_dirs(toplevel): + hg_files = set() + hg_dirs = {toplevel} + out, err, ret = do_ex(["hg", "files"], cwd=toplevel) + if ret: + (), () + for name in out.splitlines(): + name = os.path.normcase(name).replace("/", os.path.sep) + fullname = os.path.join(toplevel, name) + hg_files.add(fullname) + dirname = os.path.dirname(fullname) + while len(dirname) > len(toplevel) and dirname not in hg_dirs: + hg_dirs.add(dirname) + dirname = os.path.dirname(dirname) + return hg_files, hg_dirs + + +def hg_find_files(path=""): + toplevel = _hg_toplevel(path) + if not is_toplevel_acceptable(toplevel): + return [] + hg_files, hg_dirs = _hg_ls_files_and_dirs(toplevel) + return scm_find_files(path, hg_files, hg_dirs) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/git.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/git.py new file mode 100644 index 000000000..22e870c41 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/git.py @@ -0,0 +1,220 @@ +import os +import warnings +from datetime import date +from datetime import datetime +from os.path import isfile +from os.path import join +from os.path import samefile + +from .config import Configuration +from .scm_workdir import Workdir +from .utils import do_ex +from .utils import require_command +from .utils import trace +from .version import meta + +DEFAULT_DESCRIBE = "git describe --dirty --tags --long --match *[0-9]*" + + +class GitWorkdir(Workdir): + """experimental, may change at any time""" + + COMMAND = "git" + + @classmethod + def from_potential_worktree(cls, wd): + require_command(cls.COMMAND) + wd = os.path.abspath(wd) + real_wd, _, ret = do_ex("git rev-parse --show-prefix", wd) + real_wd = real_wd[:-1] # remove the trailing pathsep + if ret: + return + if not real_wd: + real_wd = wd + else: + assert wd.replace("\\", "/").endswith(real_wd) + # In windows wd contains ``\`` which should be replaced by ``/`` + # for this assertion to work. Length of string isn't changed by replace + # ``\\`` is just and escape for `\` + real_wd = wd[: -len(real_wd)] + trace("real root", real_wd) + if not samefile(real_wd, wd): + return + + return cls(real_wd) + + def is_dirty(self): + out, _, _ = self.do_ex("git status --porcelain --untracked-files=no") + return bool(out) + + def get_branch(self): + branch, err, ret = self.do_ex("git rev-parse --abbrev-ref HEAD") + if ret: + trace("branch err", branch, err, ret) + branch, err, ret = self.do_ex("git symbolic-ref --short HEAD") + if ret: + trace("branch err (symbolic-ref)", branch, err, ret) + branch = None + return branch + + def get_head_date(self): + timestamp, err, ret = self.do_ex("git log -n 1 HEAD --format=%cI") + if ret: + trace("timestamp err", timestamp, err, ret) + return + # TODO, when dropping python3.6 use fromiso + date_part = timestamp.split("T")[0] + if "%c" in date_part: + trace("git too old -> timestamp is ", timestamp) + return None + return datetime.strptime(date_part, r"%Y-%m-%d").date() + + def is_shallow(self): + return isfile(join(self.path, ".git/shallow")) + + def fetch_shallow(self): + self.do_ex("git fetch --unshallow") + + def node(self): + node, _, ret = self.do_ex("git rev-parse --verify --quiet HEAD") + if not ret: + return node[:7] + + def count_all_nodes(self): + revs, _, _ = self.do_ex("git rev-list HEAD") + return revs.count("\n") + 1 + + def default_describe(self): + return self.do_ex(DEFAULT_DESCRIBE) + + +def warn_on_shallow(wd): + """experimental, may change at any time""" + if wd.is_shallow(): + warnings.warn(f'"{wd.path}" is shallow and may cause errors') + + +def fetch_on_shallow(wd): + """experimental, may change at any time""" + if wd.is_shallow(): + warnings.warn(f'"{wd.path}" was shallow, git fetch was used to rectify') + wd.fetch_shallow() + + +def fail_on_shallow(wd): + """experimental, may change at any time""" + if wd.is_shallow(): + raise ValueError( + f'{wd.path} is shallow, please correct with "git fetch --unshallow"' + ) + + +def get_working_directory(config): + """ + Return the working directory (``GitWorkdir``). + """ + + if config.parent: + return GitWorkdir.from_potential_worktree(config.parent) + + if config.search_parent_directories: + return search_parent(config.absolute_root) + + return GitWorkdir.from_potential_worktree(config.absolute_root) + + +def parse(root, describe_command=None, pre_parse=warn_on_shallow, config=None): + """ + :param pre_parse: experimental pre_parse action, may change at any time + """ + if not config: + config = Configuration(root=root) + + wd = get_working_directory(config) + if wd: + return _git_parse_inner( + config, wd, describe_command=describe_command, pre_parse=pre_parse + ) + + +def _git_parse_inner(config, wd, pre_parse=None, describe_command=None): + if pre_parse: + pre_parse(wd) + + if config.git_describe_command is not None: + describe_command = config.git_describe_command + + if describe_command is not None: + out, _, ret = wd.do_ex(describe_command) + else: + out, _, ret = wd.default_describe() + + if ret == 0: + tag, distance, node, dirty = _git_parse_describe(out) + if distance == 0 and not dirty: + distance = None + else: + # If 'git git_describe_command' failed, try to get the information otherwise. + tag = "0.0" + node = wd.node() + if node is None: + distance = 0 + else: + distance = wd.count_all_nodes() + node = "g" + node + dirty = wd.is_dirty() + + branch = wd.get_branch() + node_date = wd.get_head_date() or date.today() + + return meta( + tag, + branch=branch, + node=node, + node_date=node_date, + distance=distance, + dirty=dirty, + config=config, + ) + + +def _git_parse_describe(describe_output): + # 'describe_output' looks e.g. like 'v1.5.0-0-g4060507' or + # 'v1.15.1rc1-37-g9bd1298-dirty'. + + if describe_output.endswith("-dirty"): + dirty = True + describe_output = describe_output[:-6] + else: + dirty = False + + tag, number, node = describe_output.rsplit("-", 2) + number = int(number) + return tag, number, node, dirty + + +def search_parent(dirname): + """ + Walk up the path to find the `.git` directory. + :param dirname: Directory from which to start searching. + """ + + # Code based on: + # https://github.com/gitpython-developers/GitPython/blob/main/git/repo/base.py + + curpath = os.path.abspath(dirname) + + while curpath: + + try: + wd = GitWorkdir.from_potential_worktree(curpath) + except Exception: + wd = None + + if wd is not None: + return wd + + curpath, tail = os.path.split(curpath) + + if not tail: + return None diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hacks.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hacks.py new file mode 100644 index 000000000..849f21ffe --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hacks.py @@ -0,0 +1,40 @@ +import os + +from .utils import data_from_mime +from .utils import trace +from .version import meta +from .version import tag_to_version + + +def parse_pkginfo(root, config=None): + + pkginfo = os.path.join(root, "PKG-INFO") + trace("pkginfo", pkginfo) + data = data_from_mime(pkginfo) + version = data.get("Version") + if version != "UNKNOWN": + return meta(version, preformatted=True, config=config) + + +def parse_pip_egg_info(root, config=None): + pipdir = os.path.join(root, "pip-egg-info") + if not os.path.isdir(pipdir): + return + items = os.listdir(pipdir) + trace("pip-egg-info", pipdir, items) + if not items: + return + return parse_pkginfo(os.path.join(pipdir, items[0]), config=config) + + +def fallback_version(root, config=None): + if config.parentdir_prefix_version is not None: + _, parent_name = os.path.split(os.path.abspath(root)) + if parent_name.startswith(config.parentdir_prefix_version): + version = tag_to_version( + parent_name[len(config.parentdir_prefix_version) :], config + ) + if version is not None: + return meta(str(version), preformatted=True, config=config) + if config.fallback_version is not None: + return meta(config.fallback_version, preformatted=True, config=config) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg.py new file mode 100644 index 000000000..8166a9072 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg.py @@ -0,0 +1,169 @@ +import os +from pathlib import Path + +from .config import Configuration +from .scm_workdir import Workdir +from .utils import data_from_mime +from .utils import do_ex +from .utils import require_command +from .utils import trace +from .version import meta +from .version import tag_to_version + + +class HgWorkdir(Workdir): + + COMMAND = "hg" + + @classmethod + def from_potential_worktree(cls, wd): + require_command(cls.COMMAND) + root, err, ret = do_ex("hg root", wd) + if ret: + return + return cls(root) + + def get_meta(self, config): + + node, tags, bookmark, node_date = self.hg_log( + ".", "{node}\n{tag}\n{bookmark}\n{date|shortdate}" + ).split("\n") + + # TODO: support bookmarks and topics (but nowadays bookmarks are + # mainly used to emulate Git branches, which is already supported with + # the dedicated class GitWorkdirHgClient) + + branch, dirty, dirty_date = self.do( + ["hg", "id", "-T", "{branch}\n{if(dirty, 1, 0)}\n{date|shortdate}"] + ).split("\n") + dirty = bool(int(dirty)) + + if dirty: + date = dirty_date + else: + date = node_date + + if all(c == "0" for c in node): + trace("initial node", self.path) + return meta("0.0", config=config, dirty=dirty, branch=branch) + + node = "h" + node[:7] + + tags = tags.split() + if "tip" in tags: + # tip is not a real tag + tags = tags.remove("tip") + + if tags: + tag = tags[0] + tag = tag_to_version(tag) + if tag: + return meta(tag, dirty=dirty, branch=branch, config=config) + + try: + tag = self.get_latest_normalizable_tag() + dist = self.get_distance_revs(tag) + if tag == "null": + tag = "0.0" + dist = int(dist) + 1 + + if self.check_changes_since_tag(tag) or dirty: + return meta( + tag, + distance=dist, + node=node, + dirty=dirty, + branch=branch, + config=config, + node_date=date, + ) + else: + return meta(tag, config=config) + + except ValueError: + pass # unpacking failed, old hg + + def hg_log(self, revset, template): + cmd = ["hg", "log", "-r", revset, "-T", template] + return self.do(cmd) + + def get_latest_normalizable_tag(self): + # Gets all tags containing a '.' (see #229) from oldest to newest + outlines = self.hg_log( + revset="ancestors(.) and tag('re:\\.')", + template="{tags}{if(tags, '\n', '')}", + ).split() + if not outlines: + return "null" + tag = outlines[-1].split()[-1] + return tag + + def get_distance_revs(self, rev1, rev2="."): + revset = f"({rev1}::{rev2})" + out = self.hg_log(revset, ".") + return len(out) - 1 + + def check_changes_since_tag(self, tag): + + if tag == "0.0": + return True + + revset = ( + "(branch(.)" # look for revisions in this branch only + f" and tag({tag!r})::." # after the last tag + # ignore commits that only modify .hgtags and nothing else: + " and (merge() or file('re:^(?!\\.hgtags).*$'))" + f" and not tag({tag!r}))" # ignore the tagged commit itself + ) + + return bool(self.hg_log(revset, ".")) + + +def parse(root, config=None): + if not config: + config = Configuration(root=root) + + if os.path.exists(os.path.join(root, ".hg/git")): + paths, _, ret = do_ex("hg path", root) + if not ret: + for line in paths.split("\n"): + if line.startswith("default ="): + path = Path(line.split()[2]) + if path.name.endswith(".git") or (path / ".git").exists(): + from .git import _git_parse_inner + from .hg_git import GitWorkdirHgClient + + wd = GitWorkdirHgClient.from_potential_worktree(root) + if wd: + return _git_parse_inner(config, wd) + + wd = HgWorkdir.from_potential_worktree(config.absolute_root) + + if wd is None: + return + + return wd.get_meta(config) + + +def archival_to_version(data, config: "Configuration | None" = None): + trace("data", data) + node = data.get("node", "")[:12] + if node: + node = "h" + node + if "tag" in data: + return meta(data["tag"], config=config) + elif "latesttag" in data: + return meta( + data["latesttag"], + distance=data["latesttagdistance"], + node=node, + config=config, + ) + else: + return meta("0.0", node=node, config=config) + + +def parse_archival(root, config=None): + archival = os.path.join(root, ".hg_archival.txt") + data = data_from_mime(archival) + return archival_to_version(data, config=config) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg_git.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg_git.py new file mode 100644 index 000000000..b871a3933 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg_git.py @@ -0,0 +1,133 @@ +import os +from datetime import datetime + +from .git import GitWorkdir +from .hg import HgWorkdir +from .utils import do_ex +from .utils import require_command +from .utils import trace + + +class GitWorkdirHgClient(GitWorkdir, HgWorkdir): + COMMAND = "hg" + + @classmethod + def from_potential_worktree(cls, wd): + require_command(cls.COMMAND) + root, err, ret = do_ex("hg root", wd) + if ret: + return + return cls(root) + + def is_dirty(self): + out, _, _ = self.do_ex("hg id -T '{dirty}'") + return bool(out) + + def get_branch(self): + branch, err, ret = self.do_ex("hg id -T {bookmarks}") + if ret: + trace("branch err", branch, err, ret) + return + return branch + + def get_head_date(self): + date_part, err, ret = self.do_ex("hg log -r . -T {shortdate(date)}") + if ret: + trace("head date err", date_part, err, ret) + return + return datetime.strptime(date_part, r"%Y-%m-%d").date() + + def is_shallow(self): + return False + + def fetch_shallow(self): + pass + + def get_hg_node(self): + node, _, ret = self.do_ex("hg log -r . -T {node}") + if not ret: + return node + + def _hg2git(self, hg_node): + git_node = None + with open(os.path.join(self.path, ".hg/git-mapfile")) as file: + for line in file: + if hg_node in line: + git_node, hg_node = line.split() + break + return git_node + + def node(self): + hg_node = self.get_hg_node() + if hg_node is None: + return + + git_node = self._hg2git(hg_node) + + if git_node is None: + # trying again after hg -> git + self.do_ex("hg gexport") + git_node = self._hg2git(hg_node) + + if git_node is None: + trace("Cannot get git node so we use hg node", hg_node) + + if hg_node == "0" * len(hg_node): + # mimick Git behavior + return None + + return hg_node + + return git_node[:7] + + def count_all_nodes(self): + revs, _, _ = self.do_ex("hg log -r 'ancestors(.)' -T '.'") + return len(revs) + + def default_describe(self): + """ + Tentative to reproduce the output of + + `git describe --dirty --tags --long --match *[0-9]*` + + """ + hg_tags, _, ret = self.do_ex( + [ + "hg", + "log", + "-r", + "(reverse(ancestors(.)) and tag(r're:[0-9]'))", + "-T", + "{tags}{if(tags, ' ', '')}", + ] + ) + if ret: + return None, None, None + hg_tags = hg_tags.split() + + if not hg_tags: + return None, None, None + + git_tags = {} + with open(os.path.join(self.path, ".hg/git-tags")) as file: + for line in file: + node, tag = line.split() + git_tags[tag] = node + + # find the first hg tag which is also a git tag + for tag in hg_tags: + if tag in git_tags: + break + + out, _, ret = self.do_ex(["hg", "log", "-r", f"'{tag}'::.", "-T", "."]) + if ret: + return None, None, None + distance = len(out) - 1 + + node = self.node() + desc = f"{tag}-{distance}-g{node}" + + if self.is_dirty(): + desc += "-dirty" + + return desc, None, 0 diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/integration.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/integration.py new file mode 100644 index 000000000..ad69a3ffa --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/integration.py @@ -0,0 +1,94 @@ +import os +import warnings + +import setuptools + +from . import _get_version +from .config import _read_dist_name_from_setup_cfg +from .config import Configuration +from .utils import do +from .utils import iter_entry_points +from .utils import trace + + +def _warn_on_old_setuptools(_version=setuptools.__version__): + if int(_version.split(".")[0]) < 45: + warnings.warn( + RuntimeWarning( + f""" +ERROR: setuptools=={_version} is used in combination with setuptools_scm>=6.x + +Your build configuration is incomplete and previously worked by accident! + + +This happens as setuptools is unable to replace itself when a activated build dependency +requires a more recent setuptools version +(it does not respect "setuptools>X" in setup_requires). + + +setuptools>=31 is required for setup.cfg metadata support +setuptools>=42 is required for pyproject.toml configuration support + +Suggested workarounds if applicable: + - preinstalling build dependencies like setuptools_scm before running setup.py + - installing setuptools_scm using the system package manager to ensure consistency + - migrating from the deprecated setup_requires mechanism to pep517/518 + and using a pyproject.toml to declare build dependencies + which are reliably pre-installed before running the build tools +""" + ) + ) + + +_warn_on_old_setuptools() + + +def version_keyword(dist: setuptools.Distribution, keyword, value): + if not value: + return + if value is True: + value = {} + if getattr(value, "__call__", None): + value = value() + assert ( + "dist_name" not in value + ), "dist_name may not be specified in the setup keyword " + + trace( + "version keyword", + vars(dist.metadata), + ) + dist_name = dist.metadata.name # type: str | None + if dist_name is None: + dist_name = _read_dist_name_from_setup_cfg() + config = Configuration(dist_name=dist_name, **value) + dist.metadata.version = _get_version(config) + + +def find_files(path=""): + for ep in iter_entry_points("setuptools_scm.files_command"): + command = ep.load() + if isinstance(command, str): + # this technique is deprecated + res = do(ep.load(), path or ".").splitlines() + else: + res = command(path) + if res: + return res + return [] + + +def infer_version(dist: setuptools.Distribution): + trace( + "finalize hook", + vars(dist.metadata), + ) + dist_name = dist.metadata.name + if not os.path.isfile("pyproject.toml"): + return + try: + config = Configuration.from_file(dist_name=dist_name) + except LookupError as e: + trace(e) + else: + dist.metadata.version = _get_version(config) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/scm_workdir.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/scm_workdir.py new file mode 100644 index 000000000..142065f59 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/scm_workdir.py @@ -0,0 +1,15 @@ +from .utils import do +from .utils import do_ex +from .utils import require_command + + +class Workdir: + def __init__(self, path): + require_command(self.COMMAND) + self.path = path + + def do_ex(self, cmd): + return do_ex(cmd, cwd=self.path) + + def do(self, cmd): + return do(cmd, cwd=self.path) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/utils.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/utils.py new file mode 100644 index 000000000..2e84f870a --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/utils.py @@ -0,0 +1,154 @@ +""" +utils +""" +import inspect +import os +import platform +import shlex +import subprocess +import sys +import warnings +from typing import Optional + +DEBUG = bool(os.environ.get("SETUPTOOLS_SCM_DEBUG")) +IS_WINDOWS = platform.system() == "Windows" + + +def no_git_env(env): + # adapted from pre-commit + # Too many bugs dealing with environment variables and GIT: + # https://github.com/pre-commit/pre-commit/issues/300 + # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running + # pre-commit hooks + # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE + # while running pre-commit hooks in submodules. + # GIT_DIR: Causes git clone to clone wrong thing + # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit + for k, v in env.items(): + if k.startswith("GIT_"): + trace(k, v) + return { + k: v + for k, v in env.items() + if not k.startswith("GIT_") + or k in ("GIT_EXEC_PATH", "GIT_SSH", "GIT_SSH_COMMAND") + } + + +def trace(*k) -> None: + if DEBUG: + print(*k, file=sys.stderr, flush=True) + + +def ensure_stripped_str(str_or_bytes): + if isinstance(str_or_bytes, str): + return str_or_bytes.strip() + else: + return str_or_bytes.decode("utf-8", "surrogateescape").strip() + + +def _always_strings(env_dict): + """ + On Windows and Python 2, environment dictionaries must be strings + and not unicode. + """ + if IS_WINDOWS: + env_dict.update((key, str(value)) for (key, value) in env_dict.items()) + return env_dict + + +def _popen_pipes(cmd, cwd): + return subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=str(cwd), + env=_always_strings( + dict( + no_git_env(os.environ), + # os.environ, + # try to disable i18n + LC_ALL="C", + LANGUAGE="", + HGPLAIN="1", + ) + ), + ) + + +def do_ex(cmd, cwd="."): + trace("cmd", repr(cmd)) + trace(" in", cwd) + if os.name == "posix" and not isinstance(cmd, (list, tuple)): + cmd = shlex.split(cmd) + + p = _popen_pipes(cmd, cwd) + out, err = p.communicate() + if out: + trace("out", repr(out)) + if err: + trace("err", repr(err)) + if p.returncode: + trace("ret", p.returncode) + return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode + + +def do(cmd, cwd="."): + out, err, ret = do_ex(cmd, cwd) + if ret: + print(err) + return out + + +def data_from_mime(path): + with open(path, encoding="utf-8") as fp: + content = fp.read() + trace("content", repr(content)) + # the complex conditions come from reading pseudo-mime-messages + data = dict(x.split(": ", 1) for x in content.splitlines() if ": " in x) + trace("data", data) + return data + + +def function_has_arg(fn, argname): + assert inspect.isfunction(fn) + + argspec = inspect.signature(fn).parameters + + return argname in argspec + + +def has_command(name, warn=True): + try: + p = _popen_pipes([name, "help"], ".") + except OSError: + trace(*sys.exc_info()) + res = False + else: + p.communicate() + res = not p.returncode + if not res and warn: + warnings.warn("%r was not found" % name, category=RuntimeWarning) + return res + + +def require_command(name): + if not has_command(name, warn=False): + raise OSError("%r was not found" % name) + + +try: + from importlib.metadata import entry_points # type: ignore +except ImportError: + from pkg_resources import iter_entry_points +else: + + def iter_entry_points(group: str, name: Optional[str] = None): + all_eps = entry_points() + if hasattr(all_eps, "select"): + eps = all_eps.select(group=group) + else: + eps = all_eps[group] + if name is None: + return iter(eps) + return (ep for ep in eps if ep.name == name) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/version.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/version.py new file mode 100644 index 000000000..91e25f6a3 --- /dev/null +++ b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/version.py @@ -0,0 +1,460 @@ +import datetime +import os +import re +import time +import warnings + +from .config import Configuration +from .config import Version as PkgVersion +from .utils import iter_entry_points +from .utils import trace + + +SEMVER_MINOR = 2 +SEMVER_PATCH = 3 +SEMVER_LEN = 3 + + +def _parse_version_tag(tag, config): + tagstring = tag if isinstance(tag, str) else str(tag) + match = config.tag_regex.match(tagstring) + + result = None + if match: + if len(match.groups()) == 1: + key = 1 + else: + key = "version" + + result = { + "version": match.group(key), + "prefix": match.group(0)[: match.start(key)], + "suffix": match.group(0)[match.end(key) :], + } + + trace(f"tag '{tag}' parsed to {result}") + return result + + +def callable_or_entrypoint(group, callable_or_name): + trace("ep", (group, callable_or_name)) + + if callable(callable_or_name): + return callable_or_name + + for ep in iter_entry_points(group, callable_or_name): + trace("ep found:", ep.name) + return ep.load() + + +def tag_to_version(tag, config: "Configuration | None" = None): + """ + take a tag that might be prefixed with a keyword and return only the version part + :param config: optional configuration object + """ + trace("tag", tag) + + if not config: + config = Configuration() + + tagdict = _parse_version_tag(tag, config) + if not isinstance(tagdict, dict) or not tagdict.get("version", None): + warnings.warn(f"tag {tag!r} no version found") + return None + + version = tagdict["version"] + trace("version pre parse", version) + + if tagdict.get("suffix", ""): + warnings.warn( + "tag {!r} will be stripped of its suffix '{}'".format( + tag, tagdict["suffix"] + ) + ) + + version = config.version_cls(version) + trace("version", repr(version)) + + return version + + +def tags_to_versions(tags, config=None): + """ + take tags that might be prefixed with a keyword and return only the version part + :param tags: an iterable of tags + :param config: optional configuration object + """ + result = [] + for tag in tags: + tag = tag_to_version(tag, config=config) + if tag: + result.append(tag) + return result + + +class ScmVersion: + def __init__( + self, + tag_version, + distance=None, + node=None, + dirty=False, + preformatted=False, + branch=None, + config=None, + node_date=None, + **kw, + ): + if kw: + trace("unknown args", kw) + self.tag = tag_version + if dirty and distance is None: + distance = 0 + self.distance = distance + self.node = node + self.node_date = node_date + self.time = datetime.datetime.utcfromtimestamp( + int(os.environ.get("SOURCE_DATE_EPOCH", time.time())) + ) + self._extra = kw + self.dirty = dirty + self.preformatted = preformatted + self.branch = branch + self.config = config + + @property + def extra(self): + warnings.warn( + "ScmVersion.extra is deprecated and will be removed in future", + category=DeprecationWarning, + stacklevel=2, + ) + return self._extra + + @property + def exact(self): + return self.distance is None + + def __repr__(self): + return self.format_with( + "" + ) + + def format_with(self, fmt, **kw): + return fmt.format( + time=self.time, + tag=self.tag, + distance=self.distance, + node=self.node, + dirty=self.dirty, + branch=self.branch, + node_date=self.node_date, + **kw, + ) + + def format_choice(self, clean_format, dirty_format, **kw): + return self.format_with(dirty_format if self.dirty else clean_format, **kw) + + def format_next_version(self, guess_next, fmt="{guessed}.dev{distance}", **kw): + guessed = guess_next(self.tag, **kw) + return self.format_with(fmt, guessed=guessed) + + +def _parse_tag(tag, preformatted, config: "Configuration|None"): + if preformatted: + return tag + if config is None or not isinstance(tag, config.version_cls): + tag = tag_to_version(tag, config) + return tag + + +def meta( + tag, + distance: "int|None" = None, + dirty: bool = False, + node: "str|None" = None, + preformatted: bool = False, + branch: "str|None" = None, + config: "Configuration|None" = None, + **kw, +): + if not config: + warnings.warn( + "meta invoked without explicit configuration," + " will use defaults where required." + ) + parsed_version = _parse_tag(tag, preformatted, config) + trace("version", tag, "->", parsed_version) + assert parsed_version is not None, "Can't parse version %s" % tag + return ScmVersion( + parsed_version, distance, node, dirty, preformatted, branch, config, **kw + ) + + +def guess_next_version(tag_version: ScmVersion): + version = _strip_local(str(tag_version)) + return _bump_dev(version) or _bump_regex(version) + + +def _strip_local(version_string): + public, sep, local = version_string.partition("+") + return public + + +def _bump_dev(version): + if ".dev" not in version: + return + + prefix, tail = version.rsplit(".dev", 1) + if tail != "0": + raise ValueError( + "choosing custom numbers for the `.devX` distance " + "is not supported.\n " + "The {version} can't be bumped\n" + "Please drop the tag or create a new supported one".format(version=version) + ) + return prefix + + +def _bump_regex(version): + match = re.match(r"(.*?)(\d+)$", version) + if match is None: + raise ValueError( + "{version} does not end with a number to bump, " + "please correct or use a custom version scheme".format(version=version) + ) + else: + prefix, tail = match.groups() + return "%s%d" % (prefix, int(tail) + 1) + + +def guess_next_dev_version(version): + if version.exact: + return version.format_with("{tag}") + else: + return version.format_next_version(guess_next_version) + + +def guess_next_simple_semver(version, retain, increment=True): + try: + parts = [int(i) for i in str(version).split(".")[:retain]] + except ValueError: + raise ValueError(f"{version} can't be parsed as numeric version") + while len(parts) < retain: + parts.append(0) + if increment: + parts[-1] += 1 + while len(parts) < SEMVER_LEN: + parts.append(0) + return ".".join(str(i) for i in parts) + + +def simplified_semver_version(version): + if version.exact: + return guess_next_simple_semver(version.tag, retain=SEMVER_LEN, increment=False) + else: + if version.branch is not None and "feature" in version.branch: + return version.format_next_version( + guess_next_simple_semver, retain=SEMVER_MINOR + ) + else: + return version.format_next_version( + guess_next_simple_semver, retain=SEMVER_PATCH + ) + + +def release_branch_semver_version(version): + if version.exact: + return version.format_with("{tag}") + if version.branch is not None: + # Does the branch name (stripped of namespace) parse as a version? + branch_ver = _parse_version_tag(version.branch.split("/")[-1], version.config) + if branch_ver is not None: + branch_ver = branch_ver["version"] + if branch_ver[0] == "v": + # Allow branches that start with 'v', similar to Version. + branch_ver = branch_ver[1:] + # Does the branch version up to the minor part match the tag? If not it + # might be like, an issue number or something and not a version number, so + # we only want to use it if it matches. + tag_ver_up_to_minor = str(version.tag).split(".")[:SEMVER_MINOR] + branch_ver_up_to_minor = branch_ver.split(".")[:SEMVER_MINOR] + if branch_ver_up_to_minor == tag_ver_up_to_minor: + # We're in a release/maintenance branch, next is a patch/rc/beta bump: + return version.format_next_version(guess_next_version) + # We're in a development branch, next is a minor bump: + return version.format_next_version(guess_next_simple_semver, retain=SEMVER_MINOR) + + +def release_branch_semver(version): + warnings.warn( + "release_branch_semver is deprecated and will be removed in future. " + + "Use release_branch_semver_version instead", + category=DeprecationWarning, + stacklevel=2, + ) + return release_branch_semver_version(version) + + +def no_guess_dev_version(version): + if version.exact: + return version.format_with("{tag}") + else: + return version.format_with("{tag}.post1.dev{distance}") + + +def date_ver_match(ver): + match = re.match( + ( + r"^(?P(?P\d{2}|\d{4})(?:\.\d{1,2}){2})" + r"(?:\.(?P\d*)){0,1}?$" + ), + str(ver), + ) + return match + + +def guess_next_date_ver(version, node_date=None, date_fmt=None, version_cls=None): + """ + same-day -> patch +1 + other-day -> today + + distance is always added as .devX + """ + match = date_ver_match(version) + if match is None: + warnings.warn( + f"{version} does not correspond to a valid versioning date, " + "assuming legacy version" + ) + if date_fmt is None: + date_fmt = "%y.%m.%d" + + # deduct date format if not provided + if date_fmt is None: + date_fmt = "%Y.%m.%d" if len(match.group("year")) == 4 else "%y.%m.%d" + head_date = node_date or datetime.date.today() + # compute patch + if match is None: + tag_date = datetime.date.today() + else: + tag_date = datetime.datetime.strptime(match.group("date"), date_fmt).date() + if tag_date == head_date: + patch = "0" if match is None else (match.group("patch") or "0") + patch = int(patch) + 1 + else: + if tag_date > head_date and match is not None: + # warn on future times + warnings.warn( + "your previous tag ({}) is ahead your node date ({})".format( + tag_date, head_date + ) + ) + patch = 0 + next_version = "{node_date:{date_fmt}}.{patch}".format( + node_date=head_date, date_fmt=date_fmt, patch=patch + ) + # rely on the Version object to ensure consistency (e.g. remove leading 0s) + if version_cls is None: + version_cls = PkgVersion + next_version = str(version_cls(next_version)) + return next_version + + +def calver_by_date(version): + if version.exact and not version.dirty: + return version.format_with("{tag}") + # TODO: move the release-X check to a new scheme + if version.branch is not None and version.branch.startswith("release-"): + branch_ver = _parse_version_tag(version.branch.split("-")[-1], version.config) + if branch_ver is not None: + ver = branch_ver["version"] + match = date_ver_match(ver) + if match: + return ver + return version.format_next_version( + guess_next_date_ver, + node_date=version.node_date, + version_cls=version.config.version_cls, + ) + + +def _format_local_with_time(version, time_format): + + if version.exact or version.node is None: + return version.format_choice( + "", "+d{time:{time_format}}", time_format=time_format + ) + else: + return version.format_choice( + "+{node}", "+{node}.d{time:{time_format}}", time_format=time_format + ) + + +def get_local_node_and_date(version): + return _format_local_with_time(version, time_format="%Y%m%d") + + +def get_local_node_and_timestamp(version, fmt="%Y%m%d%H%M%S"): + return _format_local_with_time(version, time_format=fmt) + + +def get_local_dirty_tag(version): + return version.format_choice("", "+dirty") + + +def get_no_local_node(_): + return "" + + +def postrelease_version(version): + if version.exact: + return version.format_with("{tag}") + else: + return version.format_with("{tag}.post{distance}") + + +def _get_ep(group, name): + for ep in iter_entry_points(group, name): + trace("ep found:", ep.name) + return ep.load() + + +def _iter_version_schemes(entrypoint, scheme_value, _memo=None): + if _memo is None: + _memo = set() + if isinstance(scheme_value, str): + scheme_value = _get_ep(entrypoint, scheme_value) + + if isinstance(scheme_value, (list, tuple)): + for variant in scheme_value: + if variant not in _memo: + _memo.add(variant) + yield from _iter_version_schemes(entrypoint, variant, _memo=_memo) + elif callable(scheme_value): + yield scheme_value + + +def _call_version_scheme(version, entypoint, given_value, default): + for scheme in _iter_version_schemes(entypoint, given_value): + result = scheme(version) + if result is not None: + return result + return default + + +def format_version(version, **config): + trace("scm version", version) + trace("config", config) + if version.preformatted: + return version.tag + main_version = _call_version_scheme( + version, "setuptools_scm.version_scheme", config["version_scheme"], None + ) + trace("version", main_version) + assert main_version is not None + local_version = _call_version_scheme( + version, "setuptools_scm.local_scheme", config["local_scheme"], "+unknown" + ) + trace("local_version", local_version) + return main_version + local_version diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/LICENSE b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/LICENSE new file mode 100644 index 000000000..89de35479 --- /dev/null +++ b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/LICENSE @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/PKG-INFO new file mode 100644 index 000000000..78c9f981d --- /dev/null +++ b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/PKG-INFO @@ -0,0 +1,44 @@ +Metadata-Version: 2.1 +Name: setuptools-scm-git-archive +Version: 1.1 +Summary: setuptools_scm plugin for git archives +Home-page: https://github.com/Changaco/setuptools_scm_git_archive/ +Author: Changaco +Author-email: changaco@changaco.oy.lc +License: MIT +Keywords: scm vcs version tags git archive +Platform: UNKNOWN + +This is a `setuptools_scm `_ plugin +that adds support for git archives (for example the ones GitHub automatically +generates). + +Note that it only works for archives of tagged commits (because git currently +lacks a format option equivalent to ``git describe --tags``). + +Usage +----- + +Add ``'setuptools_scm_git_archive'`` to the ``setup_requires`` parameter in your +project's ``setup.py`` file: + +.. code:: python + + setup( + ..., + use_scm_version=True, + setup_requires=['setuptools_scm', 'setuptools_scm_git_archive'], + ..., + ) + +Create a ``.git_archival.txt`` file with the following content:: + + ref-names: $Format:%D$ + +Then add this line to the ``.gitattributes`` file:: + + .git_archival.txt export-subst + +Finally, don't forget to commit these two files. + + diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/RECORD b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/RECORD new file mode 100644 index 000000000..fc3cf20eb --- /dev/null +++ b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/RECORD @@ -0,0 +1,7 @@ +setuptools_scm_git_archive/__init__.py,sha256=Ds2ZcVtE-4R1sw5Y0Pps3xrRTvl_La4VTjy25hOje5Y,518 +setuptools_scm_git_archive-1.1.dist-info/LICENSE,sha256=iYB6zyMJvShfAzQE7nhYFgLzzZuBmhasLw5fYP9KRz4,1023 +setuptools_scm_git_archive-1.1.dist-info/METADATA,sha256=oM3mrg7VgUdhFGJiSv1AyiOVEh_MOzNlX8Atg8lji10,1150 +setuptools_scm_git_archive-1.1.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110 +setuptools_scm_git_archive-1.1.dist-info/entry_points.txt,sha256=pnu7pquTqm1K1FYh6SbnBw8m3VGKrST-SORk5h2Vqqw,171 +setuptools_scm_git_archive-1.1.dist-info/top_level.txt,sha256=7hn9ByUzlXSHy5nZTcHi8oq763_bGWG4Zg-902SA01A,27 +setuptools_scm_git_archive-1.1.dist-info/RECORD,, diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/WHEEL b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/WHEEL new file mode 100644 index 000000000..c8240f03e --- /dev/null +++ b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/entry_points.txt b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/entry_points.txt new file mode 100644 index 000000000..5a714ca9d --- /dev/null +++ b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/entry_points.txt @@ -0,0 +1,6 @@ +[setuptools_scm.parse_scm] +.git_archival.txt = setuptools_scm_git_archive:parse + +[setuptools_scm.parse_scm_fallback] +.git_archival.txt = setuptools_scm_git_archive:parse + diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/top_level.txt b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/top_level.txt new file mode 100644 index 000000000..066fe456c --- /dev/null +++ b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/top_level.txt @@ -0,0 +1 @@ +setuptools_scm_git_archive diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/setuptools_scm_git_archive/__init__.py b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/setuptools_scm_git_archive/__init__.py new file mode 100644 index 000000000..1e8275817 --- /dev/null +++ b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/setuptools_scm_git_archive/__init__.py @@ -0,0 +1,21 @@ +from os.path import join +import re + +from setuptools_scm.utils import data_from_mime, trace +from setuptools_scm.version import meta, tags_to_versions + + +tag_re = re.compile(r'(?<=\btag: )([^,]+)\b') + + +def archival_to_version(data): + trace('data', data) + versions = tags_to_versions(tag_re.findall(data.get('ref-names', ''))) + if versions: + return meta(versions[0]) + + +def parse(root): + archival = join(root, '.git_archival.txt') + data = data_from_mime(archival) + return archival_to_version(data) diff --git a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE new file mode 100644 index 000000000..e859590f8 --- /dev/null +++ b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Taneli Hukkinen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO new file mode 100644 index 000000000..ad224bfb4 --- /dev/null +++ b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO @@ -0,0 +1,208 @@ +Metadata-Version: 2.1 +Name: tomli +Version: 1.2.2 +Summary: A lil' TOML parser +Keywords: toml +Author-email: Taneli Hukkinen +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Typing :: Typed +Project-URL: Changelog, https://github.com/hukkin/tomli/blob/master/CHANGELOG.md +Project-URL: Homepage, https://github.com/hukkin/tomli + +[![Build Status](https://github.com/hukkin/tomli/workflows/Tests/badge.svg?branch=master)](https://github.com/hukkin/tomli/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush) +[![codecov.io](https://codecov.io/gh/hukkin/tomli/branch/master/graph/badge.svg)](https://codecov.io/gh/hukkin/tomli) +[![PyPI version](https://img.shields.io/pypi/v/tomli)](https://pypi.org/project/tomli) + +# Tomli + +> A lil' TOML parser + +**Table of Contents** *generated with [mdformat-toc](https://github.com/hukkin/mdformat-toc)* + + + +- [Intro](#intro) +- [Installation](#installation) +- [Usage](#usage) + - [Parse a TOML string](#parse-a-toml-string) + - [Parse a TOML file](#parse-a-toml-file) + - [Handle invalid TOML](#handle-invalid-toml) + - [Construct `decimal.Decimal`s from TOML floats](#construct-decimaldecimals-from-toml-floats) +- [FAQ](#faq) + - [Why this parser?](#why-this-parser) + - [Is comment preserving round-trip parsing supported?](#is-comment-preserving-round-trip-parsing-supported) + - [Is there a `dumps`, `write` or `encode` function?](#is-there-a-dumps-write-or-encode-function) + - [How do TOML types map into Python types?](#how-do-toml-types-map-into-python-types) +- [Performance](#performance) + + + +## Intro + +Tomli is a Python library for parsing [TOML](https://toml.io). +Tomli is fully compatible with [TOML v1.0.0](https://toml.io/en/v1.0.0). + +## Installation + +```bash +pip install tomli +``` + +## Usage + +### Parse a TOML string + +```python +import tomli + +toml_str = """ + gretzky = 99 + + [kurri] + jari = 17 + """ + +toml_dict = tomli.loads(toml_str) +assert toml_dict == {"gretzky": 99, "kurri": {"jari": 17}} +``` + +### Parse a TOML file + +```python +import tomli + +with open("path_to_file/conf.toml", "rb") as f: + toml_dict = tomli.load(f) +``` + +The file must be opened in binary mode (with the `"rb"` flag). +Binary mode will enforce decoding the file as UTF-8 with universal newlines disabled, +both of which are required to correctly parse TOML. +Support for text file objects is deprecated for removal in the next major release. + +### Handle invalid TOML + +```python +import tomli + +try: + toml_dict = tomli.loads("]] this is invalid TOML [[") +except tomli.TOMLDecodeError: + print("Yep, definitely not valid.") +``` + +Note that while the `TOMLDecodeError` type is public API, error messages of raised instances of it are not. +Error messages should not be assumed to stay constant across Tomli versions. + +### Construct `decimal.Decimal`s from TOML floats + +```python +from decimal import Decimal +import tomli + +toml_dict = tomli.loads("precision-matters = 0.982492", parse_float=Decimal) +assert toml_dict["precision-matters"] == Decimal("0.982492") +``` + +Note that `decimal.Decimal` can be replaced with another callable that converts a TOML float from string to a Python type. +The `decimal.Decimal` is, however, a practical choice for use cases where float inaccuracies can not be tolerated. + +Illegal types include `dict`, `list`, and anything that has the `append` attribute. +Parsing floats into an illegal type results in undefined behavior. + +## FAQ + +### Why this parser? + +- it's lil' +- pure Python with zero dependencies +- the fastest pure Python parser [\*](#performance): + 15x as fast as [tomlkit](https://pypi.org/project/tomlkit/), + 2.4x as fast as [toml](https://pypi.org/project/toml/) +- outputs [basic data types](#how-do-toml-types-map-into-python-types) only +- 100% spec compliant: passes all tests in + [a test set](https://github.com/toml-lang/compliance/pull/8) + soon to be merged to the official + [compliance tests for TOML](https://github.com/toml-lang/compliance) + repository +- thoroughly tested: 100% branch coverage + +### Is comment preserving round-trip parsing supported? + +No. + +The `tomli.loads` function returns a plain `dict` that is populated with builtin types and types from the standard library only. +Preserving comments requires a custom type to be returned so will not be supported, +at least not by the `tomli.loads` and `tomli.load` functions. + +Look into [TOML Kit](https://github.com/sdispater/tomlkit) if preservation of style is what you need. + +### Is there a `dumps`, `write` or `encode` function? + +[Tomli-W](https://github.com/hukkin/tomli-w) is the write-only counterpart of Tomli, providing `dump` and `dumps` functions. + +The core library does not include write capability, as most TOML use cases are read-only, and Tomli intends to be minimal. + +### How do TOML types map into Python types? + +| TOML type | Python type | Details | +| ---------------- | ------------------- | ------------------------------------------------------------ | +| Document Root | `dict` | | +| Key | `str` | | +| String | `str` | | +| Integer | `int` | | +| Float | `float` | | +| Boolean | `bool` | | +| Offset Date-Time | `datetime.datetime` | `tzinfo` attribute set to an instance of `datetime.timezone` | +| Local Date-Time | `datetime.datetime` | `tzinfo` attribute set to `None` | +| Local Date | `datetime.date` | | +| Local Time | `datetime.time` | | +| Array | `list` | | +| Table | `dict` | | +| Inline Table | `dict` | | + +## Performance + +The `benchmark/` folder in this repository contains a performance benchmark for comparing the various Python TOML parsers. +The benchmark can be run with `tox -e benchmark-pypi`. +Running the benchmark on my personal computer output the following: + +```console +foo@bar:~/dev/tomli$ tox -e benchmark-pypi +benchmark-pypi installed: attrs==19.3.0,click==7.1.2,pytomlpp==1.0.2,qtoml==0.3.0,rtoml==0.7.0,toml==0.10.2,tomli==1.1.0,tomlkit==0.7.2 +benchmark-pypi run-test-pre: PYTHONHASHSEED='2658546909' +benchmark-pypi run-test: commands[0] | python -c 'import datetime; print(datetime.date.today())' +2021-07-23 +benchmark-pypi run-test: commands[1] | python --version +Python 3.8.10 +benchmark-pypi run-test: commands[2] | python benchmark/run.py +Parsing data.toml 5000 times: +------------------------------------------------------ + parser | exec time | performance (more is better) +-----------+------------+----------------------------- + rtoml | 0.901 s | baseline (100%) + pytomlpp | 1.08 s | 83.15% + tomli | 3.89 s | 23.15% + toml | 9.36 s | 9.63% + qtoml | 11.5 s | 7.82% + tomlkit | 56.8 s | 1.59% +``` + +The parsers are ordered from fastest to slowest, using the fastest parser as baseline. +Tomli performed the best out of all pure Python TOML parsers, +losing only to pytomlpp (wraps C++) and rtoml (wraps Rust). + diff --git a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD new file mode 100644 index 000000000..880ac4ed3 --- /dev/null +++ b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD @@ -0,0 +1,9 @@ +tomli/__init__.py,sha256=kbhPFVUJrQxajcxAWEbYzDYEjjtRJ6dGT74U4XTOkhI,299 +tomli/_parser.py,sha256=HYJuOBq1QBZm0O6PMeLJPULdYVwsdYcdZUSuABujXTM,21659 +tomli/_re.py,sha256=bw4_EVo4n1qZwcEza7akJQ_wM6hLDJFn1Zsuf9YSjs8,2817 +tomli/_types.py,sha256=b1mavYLUYLBz0EP2lDrMVM6EGVFeqvxiqkS03jXNBvs,126 +tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +tomli-1.2.2.dist-info/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +tomli-1.2.2.dist-info/WHEEL,sha256=pVNS5wRGlMB8qzi0M1coslDk7i694hS7VxZqRXRntY4,81 +tomli-1.2.2.dist-info/METADATA,sha256=bhJIzo0PW08BpJ2wMFAGN19RxM8pU1eO5FMtjhAojRc,9089 +tomli-1.2.2.dist-info/RECORD,, diff --git a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL new file mode 100644 index 000000000..3c6a1028c --- /dev/null +++ b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.4.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py b/.eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py new file mode 100644 index 000000000..7bcdbab36 --- /dev/null +++ b/.eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py @@ -0,0 +1,9 @@ +"""A lil' TOML parser.""" + +__all__ = ("loads", "load", "TOMLDecodeError") +__version__ = "1.2.2" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT + +from tomli._parser import TOMLDecodeError, load, loads + +# Pretend this exception was created here. +TOMLDecodeError.__module__ = "tomli" diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py b/.eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py new file mode 100644 index 000000000..89e81c3b3 --- /dev/null +++ b/.eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py @@ -0,0 +1,663 @@ +import string +from types import MappingProxyType +from typing import Any, BinaryIO, Dict, FrozenSet, Iterable, NamedTuple, Optional, Tuple +import warnings + +from tomli._re import ( + RE_DATETIME, + RE_LOCALTIME, + RE_NUMBER, + match_to_datetime, + match_to_localtime, + match_to_number, +) +from tomli._types import Key, ParseFloat, Pos + +ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) + +# Neither of these sets include quotation mark or backslash. They are +# currently handled as separate cases in the parser functions. +ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") +ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") + +ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS +ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS + +ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS + +TOML_WS = frozenset(" \t") +TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") +BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") +KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") +HEXDIGIT_CHARS = frozenset(string.hexdigits) + +BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( + { + "\\b": "\u0008", # backspace + "\\t": "\u0009", # tab + "\\n": "\u000A", # linefeed + "\\f": "\u000C", # form feed + "\\r": "\u000D", # carriage return + '\\"': "\u0022", # quote + "\\\\": "\u005C", # backslash + } +) + + +class TOMLDecodeError(ValueError): + """An error raised if a document is not valid TOML.""" + + +def load(fp: BinaryIO, *, parse_float: ParseFloat = float) -> Dict[str, Any]: + """Parse TOML from a binary file object.""" + s_bytes = fp.read() + try: + s = s_bytes.decode() + except AttributeError: + warnings.warn( + "Text file object support is deprecated in favor of binary file objects." + ' Use `open("foo.toml", "rb")` to open the file in binary mode.', + DeprecationWarning, + stacklevel=2, + ) + s = s_bytes # type: ignore[assignment] + return loads(s, parse_float=parse_float) + + +def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa: C901 + """Parse TOML from a string.""" + + # The spec allows converting "\r\n" to "\n", even in string + # literals. Let's do so to simplify parsing. + src = s.replace("\r\n", "\n") + pos = 0 + out = Output(NestedDict(), Flags()) + header: Key = () + + # Parse one statement at a time + # (typically means one line in TOML source) + while True: + # 1. Skip line leading whitespace + pos = skip_chars(src, pos, TOML_WS) + + # 2. Parse rules. Expect one of the following: + # - end of file + # - end of line + # - comment + # - key/value pair + # - append dict to list (and move to its namespace) + # - create dict (and move to its namespace) + # Skip trailing whitespace when applicable. + try: + char = src[pos] + except IndexError: + break + if char == "\n": + pos += 1 + continue + if char in KEY_INITIAL_CHARS: + pos = key_value_rule(src, pos, out, header, parse_float) + pos = skip_chars(src, pos, TOML_WS) + elif char == "[": + try: + second_char: Optional[str] = src[pos + 1] + except IndexError: + second_char = None + if second_char == "[": + pos, header = create_list_rule(src, pos, out) + else: + pos, header = create_dict_rule(src, pos, out) + pos = skip_chars(src, pos, TOML_WS) + elif char != "#": + raise suffixed_err(src, pos, "Invalid statement") + + # 3. Skip comment + pos = skip_comment(src, pos) + + # 4. Expect end of line or end of file + try: + char = src[pos] + except IndexError: + break + if char != "\n": + raise suffixed_err( + src, pos, "Expected newline or end of document after a statement" + ) + pos += 1 + + return out.data.dict + + +class Flags: + """Flags that map to parsed keys/namespaces.""" + + # Marks an immutable namespace (inline array or inline table). + FROZEN = 0 + # Marks a nest that has been explicitly created and can no longer + # be opened using the "[table]" syntax. + EXPLICIT_NEST = 1 + + def __init__(self) -> None: + self._flags: Dict[str, dict] = {} + + def unset_all(self, key: Key) -> None: + cont = self._flags + for k in key[:-1]: + if k not in cont: + return + cont = cont[k]["nested"] + cont.pop(key[-1], None) + + def set_for_relative_key(self, head_key: Key, rel_key: Key, flag: int) -> None: + cont = self._flags + for k in head_key: + if k not in cont: + cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + for k in rel_key: + if k in cont: + cont[k]["flags"].add(flag) + else: + cont[k] = {"flags": {flag}, "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + + def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 + cont = self._flags + key_parent, key_stem = key[:-1], key[-1] + for k in key_parent: + if k not in cont: + cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + if key_stem not in cont: + cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) + + def is_(self, key: Key, flag: int) -> bool: + if not key: + return False # document root has no flags + cont = self._flags + for k in key[:-1]: + if k not in cont: + return False + inner_cont = cont[k] + if flag in inner_cont["recursive_flags"]: + return True + cont = inner_cont["nested"] + key_stem = key[-1] + if key_stem in cont: + cont = cont[key_stem] + return flag in cont["flags"] or flag in cont["recursive_flags"] + return False + + +class NestedDict: + def __init__(self) -> None: + # The parsed content of the TOML document + self.dict: Dict[str, Any] = {} + + def get_or_create_nest( + self, + key: Key, + *, + access_lists: bool = True, + ) -> dict: + cont: Any = self.dict + for k in key: + if k not in cont: + cont[k] = {} + cont = cont[k] + if access_lists and isinstance(cont, list): + cont = cont[-1] + if not isinstance(cont, dict): + raise KeyError("There is no nest behind this key") + return cont + + def append_nest_to_list(self, key: Key) -> None: + cont = self.get_or_create_nest(key[:-1]) + last_key = key[-1] + if last_key in cont: + list_ = cont[last_key] + try: + list_.append({}) + except AttributeError: + raise KeyError("An object other than list found behind this key") + else: + cont[last_key] = [{}] + + +class Output(NamedTuple): + data: NestedDict + flags: Flags + + +def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: + try: + while src[pos] in chars: + pos += 1 + except IndexError: + pass + return pos + + +def skip_until( + src: str, + pos: Pos, + expect: str, + *, + error_on: FrozenSet[str], + error_on_eof: bool, +) -> Pos: + try: + new_pos = src.index(expect, pos) + except ValueError: + new_pos = len(src) + if error_on_eof: + raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None + + if not error_on.isdisjoint(src[pos:new_pos]): + while src[pos] not in error_on: + pos += 1 + raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}") + return new_pos + + +def skip_comment(src: str, pos: Pos) -> Pos: + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + if char == "#": + return skip_until( + src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False + ) + return pos + + +def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: + while True: + pos_before_skip = pos + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + pos = skip_comment(src, pos) + if pos == pos_before_skip: + return pos + + +def create_dict_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]: + pos += 1 # Skip "[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Can not declare {key} twice") + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.get_or_create_nest(key) + except KeyError: + raise suffixed_err(src, pos, "Can not overwrite a value") from None + + if not src.startswith("]", pos): + raise suffixed_err(src, pos, 'Expected "]" at the end of a table declaration') + return pos + 1, key + + +def create_list_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]: + pos += 2 # Skip "[[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") + # Free the namespace now that it points to another empty list item... + out.flags.unset_all(key) + # ...but this key precisely is still prohibited from table declaration + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.append_nest_to_list(key) + except KeyError: + raise suffixed_err(src, pos, "Can not overwrite a value") from None + + if not src.startswith("]]", pos): + raise suffixed_err(src, pos, 'Expected "]]" at the end of an array declaration') + return pos + 2, key + + +def key_value_rule( + src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat +) -> Pos: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + abs_key_parent = header + key_parent + + if out.flags.is_(abs_key_parent, Flags.FROZEN): + raise suffixed_err( + src, pos, f"Can not mutate immutable namespace {abs_key_parent}" + ) + # Containers in the relative path can't be opened with the table syntax after this + out.flags.set_for_relative_key(header, key, Flags.EXPLICIT_NEST) + try: + nest = out.data.get_or_create_nest(abs_key_parent) + except KeyError: + raise suffixed_err(src, pos, "Can not overwrite a value") from None + if key_stem in nest: + raise suffixed_err(src, pos, "Can not overwrite a value") + # Mark inline table and array namespaces recursively immutable + if isinstance(value, (dict, list)): + out.flags.set(header + key, Flags.FROZEN, recursive=True) + nest[key_stem] = value + return pos + + +def parse_key_value_pair( + src: str, pos: Pos, parse_float: ParseFloat +) -> Tuple[Pos, Key, Any]: + pos, key = parse_key(src, pos) + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + if char != "=": + raise suffixed_err(src, pos, 'Expected "=" after a key in a key/value pair') + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, value = parse_value(src, pos, parse_float) + return pos, key, value + + +def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]: + pos, key_part = parse_key_part(src, pos) + key: Key = (key_part,) + pos = skip_chars(src, pos, TOML_WS) + while True: + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + if char != ".": + return pos, key + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, key_part = parse_key_part(src, pos) + key += (key_part,) + pos = skip_chars(src, pos, TOML_WS) + + +def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]: + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + if char in BARE_KEY_CHARS: + start_pos = pos + pos = skip_chars(src, pos, BARE_KEY_CHARS) + return pos, src[start_pos:pos] + if char == "'": + return parse_literal_str(src, pos) + if char == '"': + return parse_one_line_basic_str(src, pos) + raise suffixed_err(src, pos, "Invalid initial character for a key part") + + +def parse_one_line_basic_str(src: str, pos: Pos) -> Tuple[Pos, str]: + pos += 1 + return parse_basic_str(src, pos, multiline=False) + + +def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]: + pos += 1 + array: list = [] + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + while True: + pos, val = parse_value(src, pos, parse_float) + array.append(val) + pos = skip_comments_and_array_ws(src, pos) + + c = src[pos : pos + 1] + if c == "]": + return pos + 1, array + if c != ",": + raise suffixed_err(src, pos, "Unclosed array") + pos += 1 + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + + +def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, dict]: + pos += 1 + nested_dict = NestedDict() + flags = Flags() + + pos = skip_chars(src, pos, TOML_WS) + if src.startswith("}", pos): + return pos + 1, nested_dict.dict + while True: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + if flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") + try: + nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) + except KeyError: + raise suffixed_err(src, pos, "Can not overwrite a value") from None + if key_stem in nest: + raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}") + nest[key_stem] = value + pos = skip_chars(src, pos, TOML_WS) + c = src[pos : pos + 1] + if c == "}": + return pos + 1, nested_dict.dict + if c != ",": + raise suffixed_err(src, pos, "Unclosed inline table") + if isinstance(value, (dict, list)): + flags.set(key, Flags.FROZEN, recursive=True) + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + + +def parse_basic_str_escape( # noqa: C901 + src: str, pos: Pos, *, multiline: bool = False +) -> Tuple[Pos, str]: + escape_id = src[pos : pos + 2] + pos += 2 + if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: + # Skip whitespace until next non-whitespace character or end of + # the doc. Error if non-whitespace is found before newline. + if escape_id != "\\\n": + pos = skip_chars(src, pos, TOML_WS) + try: + char = src[pos] + except IndexError: + return pos, "" + if char != "\n": + raise suffixed_err(src, pos, 'Unescaped "\\" in a string') + pos += 1 + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + return pos, "" + if escape_id == "\\u": + return parse_hex_char(src, pos, 4) + if escape_id == "\\U": + return parse_hex_char(src, pos, 8) + try: + return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] + except KeyError: + if len(escape_id) != 2: + raise suffixed_err(src, pos, "Unterminated string") from None + raise suffixed_err(src, pos, 'Unescaped "\\" in a string') from None + + +def parse_basic_str_escape_multiline(src: str, pos: Pos) -> Tuple[Pos, str]: + return parse_basic_str_escape(src, pos, multiline=True) + + +def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]: + hex_str = src[pos : pos + hex_len] + if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): + raise suffixed_err(src, pos, "Invalid hex value") + pos += hex_len + hex_int = int(hex_str, 16) + if not is_unicode_scalar_value(hex_int): + raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value") + return pos, chr(hex_int) + + +def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]: + pos += 1 # Skip starting apostrophe + start_pos = pos + pos = skip_until( + src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True + ) + return pos + 1, src[start_pos:pos] # Skip ending apostrophe + + +def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]: + pos += 3 + if src.startswith("\n", pos): + pos += 1 + + if literal: + delim = "'" + end_pos = skip_until( + src, + pos, + "'''", + error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, + error_on_eof=True, + ) + result = src[pos:end_pos] + pos = end_pos + 3 + else: + delim = '"' + pos, result = parse_basic_str(src, pos, multiline=True) + + # Add at maximum two extra apostrophes/quotes if the end sequence + # is 4 or 5 chars long instead of just 3. + if not src.startswith(delim, pos): + return pos, result + pos += 1 + if not src.startswith(delim, pos): + return pos, result + delim + pos += 1 + return pos, result + (delim * 2) + + +def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]: + if multiline: + error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape_multiline + else: + error_on = ILLEGAL_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape + result = "" + start_pos = pos + while True: + try: + char = src[pos] + except IndexError: + raise suffixed_err(src, pos, "Unterminated string") from None + if char == '"': + if not multiline: + return pos + 1, result + src[start_pos:pos] + if src.startswith('"""', pos): + return pos + 3, result + src[start_pos:pos] + pos += 1 + continue + if char == "\\": + result += src[start_pos:pos] + pos, parsed_escape = parse_escapes(src, pos) + result += parsed_escape + start_pos = pos + continue + if char in error_on: + raise suffixed_err(src, pos, f"Illegal character {char!r}") + pos += 1 + + +def parse_value( # noqa: C901 + src: str, pos: Pos, parse_float: ParseFloat +) -> Tuple[Pos, Any]: + try: + char: Optional[str] = src[pos] + except IndexError: + char = None + + # Basic strings + if char == '"': + if src.startswith('"""', pos): + return parse_multiline_str(src, pos, literal=False) + return parse_one_line_basic_str(src, pos) + + # Literal strings + if char == "'": + if src.startswith("'''", pos): + return parse_multiline_str(src, pos, literal=True) + return parse_literal_str(src, pos) + + # Booleans + if char == "t": + if src.startswith("true", pos): + return pos + 4, True + if char == "f": + if src.startswith("false", pos): + return pos + 5, False + + # Dates and times + datetime_match = RE_DATETIME.match(src, pos) + if datetime_match: + try: + datetime_obj = match_to_datetime(datetime_match) + except ValueError as e: + raise suffixed_err(src, pos, "Invalid date or datetime") from e + return datetime_match.end(), datetime_obj + localtime_match = RE_LOCALTIME.match(src, pos) + if localtime_match: + return localtime_match.end(), match_to_localtime(localtime_match) + + # Integers and "normal" floats. + # The regex will greedily match any type starting with a decimal + # char, so needs to be located after handling of dates and times. + number_match = RE_NUMBER.match(src, pos) + if number_match: + return number_match.end(), match_to_number(number_match, parse_float) + + # Arrays + if char == "[": + return parse_array(src, pos, parse_float) + + # Inline tables + if char == "{": + return parse_inline_table(src, pos, parse_float) + + # Special floats + first_three = src[pos : pos + 3] + if first_three in {"inf", "nan"}: + return pos + 3, parse_float(first_three) + first_four = src[pos : pos + 4] + if first_four in {"-inf", "+inf", "-nan", "+nan"}: + return pos + 4, parse_float(first_four) + + raise suffixed_err(src, pos, "Invalid value") + + +def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: + """Return a `TOMLDecodeError` where error message is suffixed with + coordinates in source.""" + + def coord_repr(src: str, pos: Pos) -> str: + if pos >= len(src): + return "end of document" + line = src.count("\n", 0, pos) + 1 + if line == 1: + column = pos + 1 + else: + column = pos - src.rindex("\n", 0, pos) + return f"line {line}, column {column}" + + return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") + + +def is_unicode_scalar_value(codepoint: int) -> bool: + return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py b/.eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py new file mode 100644 index 000000000..912682974 --- /dev/null +++ b/.eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py @@ -0,0 +1,101 @@ +from datetime import date, datetime, time, timedelta, timezone, tzinfo +from functools import lru_cache +import re +from typing import Any, Optional, Union + +from tomli._types import ParseFloat + +# E.g. +# - 00:32:00.999999 +# - 00:32:00 +_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" + +RE_NUMBER = re.compile( + r""" +0 +(?: + x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex + | + b[01](?:_?[01])* # bin + | + o[0-7](?:_?[0-7])* # oct +) +| +[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part +(?P + (?:\.[0-9](?:_?[0-9])*)? # optional fractional part + (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part +) +""", + flags=re.VERBOSE, +) +RE_LOCALTIME = re.compile(_TIME_RE_STR) +RE_DATETIME = re.compile( + fr""" +([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 +(?: + [T ] + {_TIME_RE_STR} + (?:(Z)|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset +)? +""", + flags=re.VERBOSE, +) + + +def match_to_datetime(match: "re.Match") -> Union[datetime, date]: + """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. + + Raises ValueError if the match does not correspond to a valid date + or datetime. + """ + ( + year_str, + month_str, + day_str, + hour_str, + minute_str, + sec_str, + micros_str, + zulu_time, + offset_sign_str, + offset_hour_str, + offset_minute_str, + ) = match.groups() + year, month, day = int(year_str), int(month_str), int(day_str) + if hour_str is None: + return date(year, month, day) + hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + if offset_sign_str: + tz: Optional[tzinfo] = cached_tz( + offset_hour_str, offset_minute_str, offset_sign_str + ) + elif zulu_time: + tz = timezone.utc + else: # local date-time + tz = None + return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) + + +@lru_cache(maxsize=None) +def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: + sign = 1 if sign_str == "+" else -1 + return timezone( + timedelta( + hours=sign * int(hour_str), + minutes=sign * int(minute_str), + ) + ) + + +def match_to_localtime(match: "re.Match") -> time: + hour_str, minute_str, sec_str, micros_str = match.groups() + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + return time(int(hour_str), int(minute_str), int(sec_str), micros) + + +def match_to_number(match: "re.Match", parse_float: "ParseFloat") -> Any: + if match.group("floatpart"): + return parse_float(match.group()) + return int(match.group(), 0) diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py b/.eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py new file mode 100644 index 000000000..e37cc8088 --- /dev/null +++ b/.eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py @@ -0,0 +1,6 @@ +from typing import Any, Callable, Tuple + +# Type annotations +ParseFloat = Callable[[str], Any] +Key = Tuple[str, ...] +Pos = int diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed b/.eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed new file mode 100644 index 000000000..7632ecf77 --- /dev/null +++ b/.eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561 diff --git a/stonesoup/plotter3.py b/stonesoup/plotter3.py new file mode 100644 index 000000000..7387060a7 --- /dev/null +++ b/stonesoup/plotter3.py @@ -0,0 +1,327 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Nov 18 11:33:27 2021 + +@author: Peter +""" +import warnings +from itertools import chain + +from mpl_toolkits import mplot3d # new as of plotter3 + +import numpy as np +from matplotlib import pyplot as plt +from matplotlib.lines import Line2D +from matplotlib.patches import Ellipse +from matplotlib.legend_handler import HandlerPatch + +from .types import detection +from .models.base import LinearModel, NonLinearModel + + +class Plotter: + """Plotting class for building graphs of Stone Soup simulations + + A plotting class which is used to simplify the process of plotting ground truths, + measurements, clutter and tracks. Tracks can be plotted with uncertainty ellipses or + particles if required. Legends are automatically generated with each plot. + + Attributes + ---------- + fig: matplotlib.figure.Figure + Generated figure for graphs to be plotted on + ax: matplotlib.axes.Axes + Generated axes for graphs to be plotted on + handles_list: list of :class:`matplotlib.legend_handler.HandlerBase` + A list of generated legend handles + labels_list: list of str + A list of generated legend labels + """ + + def __init__(self): + # Generate plot axes + self.fig = plt.figure(figsize=(10, 6)) + self.ax = plt.axes(projection='3d') + self.ax.set_xlabel("$x$") + self.ax.set_ylabel("$y$") + self.ax.set_zlabel("$z$") + self.ax.axis('auto') + + # Create empty lists for legend handles and labels + self.handles_list = [] + self.labels_list = [] + + def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwargs): + """Plots ground truth(s) + + Plots each ground truth path passed in to :attr:`truths` and generates a legend + automatically. Ground truths are plotted as dashed lines with default colors. + + Users can change linestyle, color and marker using keyword arguments. Any changes + will apply to all ground truths. + + Parameters + ---------- + truths : set of :class:`~.GroundTruthPath` + Set of ground truths which will be plotted. If not a set, and instead a single + :class:`~.GroundTruthPath` type, the argument is modified to be a set to allow for + iteration. + mapping: list + List of 2 items specifying the mapping of the x and y components of the state space. + \\*\\*kwargs: dict + Additional arguments to be passed to plot function. Default is ``linestyle="--"``. + """ + + truths_kwargs = dict(linestyle="--") + truths_kwargs.update(kwargs) + if not isinstance(truths, set): + truths = {truths} # Make a set of length 1 + + for truth in truths: + self.ax.plot3D([state.state_vector[mapping[0]] for state in truth], + [state.state_vector[mapping[1]] for state in truth], + [state.state_vector[mapping[2]] for state in truth], + **truths_kwargs) + + # Generate legend items + truths_handle = Line2D([], [], linestyle=truths_kwargs['linestyle'], color='black') + self.handles_list.append(truths_handle) + self.labels_list.append(truths_label) + + # Generate legend + self.ax.legend(handles=self.handles_list, labels=self.labels_list) + + def plot_measurements(self, measurements, mapping, measurement_model=None, + measurements_label="Measurements", **kwargs): + """Plots measurements + + Plots detections and clutter, generating a legend automatically. Detections are plotted as + blue circles by default unless the detection type is clutter. + If the detection type is :class:`~.Clutter` it is plotted as a yellow 'tri-up' marker. + + Users can change the color and marker of detections using keyword arguments but not for + clutter detections. + + Parameters + ---------- + measurements : list of :class:`~.Detection` + Detections which will be plotted. If measurements is a set of lists it is flattened. + mapping: list + List of 2 items specifying the mapping of the x and y components of the state space. + measurement_model : :class:`~.Model`, optional + User-defined measurement model to be used in finding measurement state inverses if + they cannot be found from the measurements themselves. + \\*\\*kwargs: dict + Additional arguments to be passed to plot function for detections. Defaults are + ``marker='o'`` and ``color='b'``. + """ + + measurement_kwargs = dict(marker='o', color='b') + measurement_kwargs.update(kwargs) + + if any(isinstance(item, set) for item in measurements): + measurements_set = chain.from_iterable(measurements) # Flatten into one set + else: + measurements_set = measurements + + plot_detections = [] + plot_clutter = [] + + for state in measurements_set: + meas_model = state.measurement_model # measurement_model from detections + if meas_model is None: + meas_model = measurement_model # measurement_model from input + + if isinstance(meas_model, LinearModel): + model_matrix = meas_model.matrix() + inv_model_matrix = np.linalg.pinv(model_matrix) + state_vec = inv_model_matrix @ state.state_vector + + elif isinstance(meas_model, NonLinearModel): + try: + state_vec = meas_model.inverse_function(state) + except (NotImplementedError, AttributeError): + warnings.warn('Nonlinear measurement model used with no inverse ' + 'function available') + continue + else: + warnings.warn('Measurement model type not specified for all detections') + continue + + if isinstance(state, detection.Clutter): + # Plot clutter + plot_clutter.append((*state_vec[mapping], )) + + elif isinstance(state, detection.Detection): + # Plot detections + plot_detections.append((*state_vec[mapping], )) + else: + warnings.warn(f'Unknown type {type(state)}') + continue + + if plot_detections: + detection_array = np.array(plot_detections) + self.ax.scatter(detection_array[:, 0], detection_array[:, 1], **measurement_kwargs) + measurements_handle = Line2D([], [], linestyle='', **measurement_kwargs) + + # Generate legend items for measurements + self.handles_list.append(measurements_handle) + self.labels_list.append(measurements_label) + + if plot_clutter: + clutter_array = np.array(plot_clutter) + self.ax.scatter(clutter_array[:, 0], clutter_array[:, 1], color='y', marker='2') + clutter_handle = Line2D([], [], linestyle='', marker='2', color='y') + clutter_label = "Clutter" + + # Generate legend items for clutter + self.handles_list.append(clutter_handle) + self.labels_list.append(clutter_label) + + # Generate legend + self.ax.legend(handles=self.handles_list, labels=self.labels_list) + + def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_label="Track", + **kwargs): + """Plots track(s) + + Plots each track generated, generating a legend automatically. If ``uncertainty=True``, + uncertainty ellipses are plotted. If ``particle=True``, particles are plotted. + Tracks are plotted as solid lines with point markers and default colors. + Uncertainty ellipses are plotted with a default color which is the same for all tracks. + + Users can change linestyle, color and marker using keyword arguments. Uncertainty ellipses + will also be plotted with the user defined colour and any changes will apply to all tracks. + + Parameters + ---------- + tracks : set of :class:`~.Track` + Set of tracks which will be plotted. If not a set, and instead a single + :class:`~.Track` type, the argument is modified to be a set to allow for iteration. + mapping: list + List of 2 items specifying the mapping of the x and y components of the state space. + uncertainty : bool + If True, function plots uncertainty ellipses. + particle : bool + If True, function plots particles. + track_label: str + Label to apply to all tracks for legend. + \\*\\*kwargs: dict + Additional arguments to be passed to plot function. Defaults are ``linestyle="-"``, + ``marker='.'`` and ``color=None``. + """ + + tracks_kwargs = dict(linestyle='-', marker=".", color=None) + tracks_kwargs.update(kwargs) + if not isinstance(tracks, set): + tracks = {tracks} # Make a set of length 1 + + # Plot tracks + track_colors = {} + for track in tracks: + line = self.ax.plot([state.state_vector[mapping[0]] for state in track], + [state.state_vector[mapping[1]] for state in track], + **tracks_kwargs) + track_colors[track] = plt.getp(line[0], 'color') + + # Assuming a single track or all plotted as the same colour then the following will work. + # Otherwise will just render the final track colour. + tracks_kwargs['color'] = plt.getp(line[0], 'color') + + # Generate legend items for track + track_handle = Line2D([], [], linestyle=tracks_kwargs['linestyle'], + marker=tracks_kwargs['marker'], color=tracks_kwargs['color']) + self.handles_list.append(track_handle) + self.labels_list.append(track_label) + + if uncertainty: + # Plot uncertainty ellipses + for track in tracks: + HH = np.eye(track.ndim)[mapping, :] # Get position mapping matrix + for state in track: + w, v = np.linalg.eig(HH @ state.covar @ HH.T) + max_ind = np.argmax(w) + min_ind = np.argmin(w) + orient = np.arctan2(v[1, max_ind], v[0, max_ind]) + ellipse = Ellipse(xy=state.state_vector[mapping[:2], 0], + width=2 * np.sqrt(w[max_ind]), + height=2 * np.sqrt(w[min_ind]), + angle=np.rad2deg(orient), alpha=0.2, + color=track_colors[track]) + self.ax.add_artist(ellipse) + + # Generate legend items for uncertainty ellipses + ellipse_handle = Ellipse((0.5, 0.5), 0.5, 0.5, alpha=0.2, color=tracks_kwargs['color']) + ellipse_label = "Uncertainty" + + self.handles_list.append(ellipse_handle) + self.labels_list.append(ellipse_label) + + # Generate legend + self.ax.legend(handles=self.handles_list, labels=self.labels_list, + handler_map={Ellipse: _HandlerEllipse()}) + + elif particle: + # Plot particles + for track in tracks: + for state in track: + data = state.particles.state_vector[mapping[:2], :] + self.ax.plot(data[0], data[1], linestyle='', marker=".", + markersize=1, alpha=0.5) + + # Generate legend items for particles + particle_handle = Line2D([], [], linestyle='', color="black", marker='.', markersize=1) + particle_label = "Particles" + self.handles_list.append(particle_handle) + self.labels_list.append(particle_label) + + # Generate legend + self.ax.legend(handles=self.handles_list, labels=self.labels_list) + + else: + self.ax.legend(handles=self.handles_list, labels=self.labels_list) + + # Ellipse legend patch (used in Tutorial 3) + @staticmethod + def ellipse_legend(ax, label_list, color_list, **kwargs): + """Adds an ellipse patch to the legend on the axes. One patch added for each item in + `label_list` with the corresponding color from `color_list`. + + Parameters + ---------- + ax : matplotlib.axes.Axes + Looks at the plot axes defined + label_list : list of str + Takes in list of strings intended to label ellipses in legend + color_list : list of str + Takes in list of colors corresponding to string/label + Must be the same length as label_list + \\*\\*kwargs: dict + Additional arguments to be passed to plot function. Default is ``alpha=0.2``. + """ + + ellipse_kwargs = dict(alpha=0.2) + ellipse_kwargs.update(kwargs) + + legend = ax.legend(handler_map={Ellipse: _HandlerEllipse()}) + handles, labels = ax.get_legend_handles_labels() + for color in color_list: + handle = Ellipse((0.5, 0.5), 0.5, 0.5, color=color, **ellipse_kwargs) + handles.append(handle) + for label in label_list: + labels.append(label) + legend._legend_box = None + legend._init_legend_box(handles, labels) + legend._set_loc(legend._loc) + legend.set_title(legend.get_title().get_text()) + + +class _HandlerEllipse(HandlerPatch): + def create_artists(self, legend, orig_handle, + xdescent, ydescent, width, height, fontsize, trans): + center = 0.5*width - 0.5*xdescent, 0.5*height - 0.5*ydescent + p = Ellipse(xy=center, width=width + xdescent, + height=height + ydescent) + self.update_prop(p, orig_handle, legend) + p.set_transform(trans) + return [p] From 73bb667ef50c24ed11a72dc2f4b246a9bc8aa381 Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Mon, 22 Nov 2021 15:56:32 -0500 Subject: [PATCH 02/15] Measurement & Track plotting support added to plotter3.py Can now plot both measurements and tracks in 3D, functionality & use is identical to plotter.py to maintain consistency in implementation. Todo - add error bars to legend, consider alternate ways to display track uncertainty --- stonesoup/plotter3.py | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/stonesoup/plotter3.py b/stonesoup/plotter3.py index 7387060a7..1b5aa0ce2 100644 --- a/stonesoup/plotter3.py +++ b/stonesoup/plotter3.py @@ -41,7 +41,7 @@ class Plotter: def __init__(self): # Generate plot axes self.fig = plt.figure(figsize=(10, 6)) - self.ax = plt.axes(projection='3d') + self.ax = self.fig.add_subplot(111, projection='3d') self.ax.set_xlabel("$x$") self.ax.set_ylabel("$y$") self.ax.set_zlabel("$z$") @@ -132,7 +132,7 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, if meas_model is None: meas_model = measurement_model # measurement_model from input - if isinstance(meas_model, LinearModel): + if isinstance(meas_model, LinearModel): # check to see if meas_model is a LinearModel type model_matrix = meas_model.matrix() inv_model_matrix = np.linalg.pinv(model_matrix) state_vec = inv_model_matrix @ state.state_vector @@ -161,7 +161,7 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, if plot_detections: detection_array = np.array(plot_detections) - self.ax.scatter(detection_array[:, 0], detection_array[:, 1], **measurement_kwargs) + self.ax.scatter(detection_array[:, 0], detection_array[:, 1], detection_array[:, 2], **measurement_kwargs) measurements_handle = Line2D([], [], linestyle='', **measurement_kwargs) # Generate legend items for measurements @@ -170,7 +170,7 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, if plot_clutter: clutter_array = np.array(plot_clutter) - self.ax.scatter(clutter_array[:, 0], clutter_array[:, 1], color='y', marker='2') + self.ax.scatter(clutter_array[:, 0], clutter_array[:, 1], clutter_array[:, 2], color='y', marker='2') clutter_handle = Line2D([], [], linestyle='', marker='2', color='y') clutter_label = "Clutter" @@ -221,6 +221,7 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ for track in tracks: line = self.ax.plot([state.state_vector[mapping[0]] for state in track], [state.state_vector[mapping[1]] for state in track], + [state.state_vector[mapping[2]] for state in track], **tracks_kwargs) track_colors[track] = plt.getp(line[0], 'color') @@ -243,15 +244,28 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ max_ind = np.argmax(w) min_ind = np.argmin(w) orient = np.arctan2(v[1, max_ind], v[0, max_ind]) - ellipse = Ellipse(xy=state.state_vector[mapping[:2], 0], + + xl = state.state_vector[mapping[0]] + yl = state.state_vector[mapping[1]] + zl = state.state_vector[mapping[2]] + + x_err = w[0] + y_err = w[1] + z_err = w[2] + + self.ax.plot3D([xl+x_err, xl-x_err], [yl, yl], [zl, zl], marker="_", color=tracks_kwargs['color']) + self.ax.plot3D([xl, xl], [yl+y_err, yl-y_err], [zl, zl], marker="_", color=tracks_kwargs['color']) + self.ax.plot3D([xl, xl], [yl, yl], [zl+z_err, zl-z_err], marker="_", color=tracks_kwargs['color']) + + '''ellipse = Ellipse(xy=state.state_vector[mapping[:2], 0], width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]), angle=np.rad2deg(orient), alpha=0.2, color=track_colors[track]) - self.ax.add_artist(ellipse) + #self.ax.add_artist(ellipse)''' # Generate legend items for uncertainty ellipses - ellipse_handle = Ellipse((0.5, 0.5), 0.5, 0.5, alpha=0.2, color=tracks_kwargs['color']) + '''ellipse_handle = Ellipse((0.5, 0.5), 0.5, 0.5, alpha=0.2, color=tracks_kwargs['color']) ellipse_label = "Uncertainty" self.handles_list.append(ellipse_handle) @@ -259,7 +273,7 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ # Generate legend self.ax.legend(handles=self.handles_list, labels=self.labels_list, - handler_map={Ellipse: _HandlerEllipse()}) + handler_map={Ellipse: _HandlerEllipse()})''' elif particle: # Plot particles From b9a5b597d584e44e03d82812f63b3600021ed7aa Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Fri, 26 Nov 2021 14:40:37 -0500 Subject: [PATCH 03/15] plotter3.py error/legend update -*NEW* Added 3D error bars as option for plot_tracks() -*NEW* err_freq is an attribute of plot_tracks() that defines how frequently error bars should be displayed along the track (default is 1, or for every point) -labels_list and handles_list removed, replaced by legend_dict. --legend_dict holds *_handle with *_label as the key -legend is now plotted with legend_dict.values() as the handles and legend_dict.keys() as the labels; avoids duplicate entries in legend if plotting the same data multiple times. If similar elements are desired to be displayed in legend, a unique *_label must be passed into the appropriate plot function *TODO* -Redo header documentation for Plotter class and methods -Add buttons to have interactive plots (turning on and off measurements, tracks, truth, etc.). See https://matplotlib.org/stable/gallery/widgets/check_buttons.html -Allow user to plot tracks and truths with unique legend identifiers WITHOUT having to call plot_tracks() for each unique element; list of labels same length as list of tracks/truth --- stonesoup/plotter3.py | 82 +++++++++++++++---------------------------- 1 file changed, 29 insertions(+), 53 deletions(-) diff --git a/stonesoup/plotter3.py b/stonesoup/plotter3.py index 1b5aa0ce2..39b7aee35 100644 --- a/stonesoup/plotter3.py +++ b/stonesoup/plotter3.py @@ -47,9 +47,9 @@ def __init__(self): self.ax.set_zlabel("$z$") self.ax.axis('auto') - # Create empty lists for legend handles and labels - self.handles_list = [] - self.labels_list = [] + # Create empty dictionary for legend handles and labels - dict used to + # prevent multiple entries with the same label from displaying on legend + self.legend_dict = {} # create an empty dictionary to hold legend entries def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwargs): """Plots ground truth(s) @@ -85,11 +85,9 @@ def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwa # Generate legend items truths_handle = Line2D([], [], linestyle=truths_kwargs['linestyle'], color='black') - self.handles_list.append(truths_handle) - self.labels_list.append(truths_label) - + self.legend_dict[truths_label] = truths_handle # Generate legend - self.ax.legend(handles=self.handles_list, labels=self.labels_list) + self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) def plot_measurements(self, measurements, mapping, measurement_model=None, measurements_label="Measurements", **kwargs): @@ -165,8 +163,7 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, measurements_handle = Line2D([], [], linestyle='', **measurement_kwargs) # Generate legend items for measurements - self.handles_list.append(measurements_handle) - self.labels_list.append(measurements_label) + self.legend_dict[measurements_label] = measurements_handle if plot_clutter: clutter_array = np.array(plot_clutter) @@ -175,13 +172,12 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, clutter_label = "Clutter" # Generate legend items for clutter - self.handles_list.append(clutter_handle) - self.labels_list.append(clutter_label) + self.legend_dict[clutter_label] = clutter_handle # Generate legend - self.ax.legend(handles=self.handles_list, labels=self.labels_list) + self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) - def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_label="Track", + def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_label="Track", err_freq=1, **kwargs): """Plots track(s) @@ -232,48 +228,29 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ # Generate legend items for track track_handle = Line2D([], [], linestyle=tracks_kwargs['linestyle'], marker=tracks_kwargs['marker'], color=tracks_kwargs['color']) - self.handles_list.append(track_handle) - self.labels_list.append(track_label) + self.legend_dict[track_label] = track_handle if uncertainty: # Plot uncertainty ellipses for track in tracks: HH = np.eye(track.ndim)[mapping, :] # Get position mapping matrix + check = err_freq for state in track: - w, v = np.linalg.eig(HH @ state.covar @ HH.T) - max_ind = np.argmax(w) - min_ind = np.argmin(w) - orient = np.arctan2(v[1, max_ind], v[0, max_ind]) - - xl = state.state_vector[mapping[0]] - yl = state.state_vector[mapping[1]] - zl = state.state_vector[mapping[2]] - - x_err = w[0] - y_err = w[1] - z_err = w[2] - - self.ax.plot3D([xl+x_err, xl-x_err], [yl, yl], [zl, zl], marker="_", color=tracks_kwargs['color']) - self.ax.plot3D([xl, xl], [yl+y_err, yl-y_err], [zl, zl], marker="_", color=tracks_kwargs['color']) - self.ax.plot3D([xl, xl], [yl, yl], [zl+z_err, zl-z_err], marker="_", color=tracks_kwargs['color']) - - '''ellipse = Ellipse(xy=state.state_vector[mapping[:2], 0], - width=2 * np.sqrt(w[max_ind]), - height=2 * np.sqrt(w[min_ind]), - angle=np.rad2deg(orient), alpha=0.2, - color=track_colors[track]) - #self.ax.add_artist(ellipse)''' - - # Generate legend items for uncertainty ellipses - '''ellipse_handle = Ellipse((0.5, 0.5), 0.5, 0.5, alpha=0.2, color=tracks_kwargs['color']) - ellipse_label = "Uncertainty" - - self.handles_list.append(ellipse_handle) - self.labels_list.append(ellipse_label) - - # Generate legend - self.ax.legend(handles=self.handles_list, labels=self.labels_list, - handler_map={Ellipse: _HandlerEllipse()})''' + if not check % err_freq: + w, v = np.linalg.eig(HH @ state.covar @ HH.T) + + xl = state.state_vector[mapping[0]] + yl = state.state_vector[mapping[1]] + zl = state.state_vector[mapping[2]] + + x_err = w[0] + y_err = w[1] + z_err = w[2] + + self.ax.plot3D([xl+x_err, xl-x_err], [yl, yl], [zl, zl], marker="_", color=tracks_kwargs['color']) + self.ax.plot3D([xl, xl], [yl+y_err, yl-y_err], [zl, zl], marker="_", color=tracks_kwargs['color']) + self.ax.plot3D([xl, xl], [yl, yl], [zl+z_err, zl-z_err], marker="_", color=tracks_kwargs['color']) + check += 1 elif particle: # Plot particles @@ -286,14 +263,13 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ # Generate legend items for particles particle_handle = Line2D([], [], linestyle='', color="black", marker='.', markersize=1) particle_label = "Particles" - self.handles_list.append(particle_handle) - self.labels_list.append(particle_label) + self.legend_dict[particle_label] = particle_handle # Generate legend - self.ax.legend(handles=self.handles_list, labels=self.labels_list) + self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) else: - self.ax.legend(handles=self.handles_list, labels=self.labels_list) + self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) # Ellipse legend patch (used in Tutorial 3) @staticmethod From 9ed9d3651d3e3d8ac144c444ebbc6948e93d198a Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Tue, 4 Jan 2022 10:25:21 -0500 Subject: [PATCH 04/15] Added legend entry for error bars --- stonesoup/plotter3.py | 1 + 1 file changed, 1 insertion(+) diff --git a/stonesoup/plotter3.py b/stonesoup/plotter3.py index 39b7aee35..6bdf8d413 100644 --- a/stonesoup/plotter3.py +++ b/stonesoup/plotter3.py @@ -251,6 +251,7 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ self.ax.plot3D([xl, xl], [yl+y_err, yl-y_err], [zl, zl], marker="_", color=tracks_kwargs['color']) self.ax.plot3D([xl, xl], [yl, yl], [zl+z_err, zl-z_err], marker="_", color=tracks_kwargs['color']) check += 1 + self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) elif particle: # Plot particles From 76fc37446d157de3300ce14c05e09ff3b05cb9c9 Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Tue, 4 Jan 2022 15:28:21 -0500 Subject: [PATCH 05/15] Refactored to meet PEP8 standards Using flake8 python library --- stonesoup/plotter3.py | 135 ++++++++++++++---------------------------- 1 file changed, 43 insertions(+), 92 deletions(-) diff --git a/stonesoup/plotter3.py b/stonesoup/plotter3.py index 6bdf8d413..4bf322f20 100644 --- a/stonesoup/plotter3.py +++ b/stonesoup/plotter3.py @@ -7,24 +7,23 @@ import warnings from itertools import chain -from mpl_toolkits import mplot3d # new as of plotter3 +from mpl_toolkits import mplot3d # new as of plotter3 import numpy as np from matplotlib import pyplot as plt from matplotlib.lines import Line2D -from matplotlib.patches import Ellipse -from matplotlib.legend_handler import HandlerPatch from .types import detection from .models.base import LinearModel, NonLinearModel class Plotter: - """Plotting class for building graphs of Stone Soup simulations + """3D plotting class for building 3D visualizations of Stone Soup simulations A plotting class which is used to simplify the process of plotting ground truths, - measurements, clutter and tracks. Tracks can be plotted with uncertainty ellipses or - particles if required. Legends are automatically generated with each plot. + measurements, clutter and tracks with information in three spatial dimensions. + Tracks can be plotted with uncertainty bars at specified intervals if required. + Legends are automatically generated with each plot. Attributes ---------- @@ -32,10 +31,9 @@ class Plotter: Generated figure for graphs to be plotted on ax: matplotlib.axes.Axes Generated axes for graphs to be plotted on - handles_list: list of :class:`matplotlib.legend_handler.HandlerBase` - A list of generated legend handles - labels_list: list of str - A list of generated legend labels + legend_dict: dict + Dictionary of legend handles as :class:`matplotlib.legend_handler.HandlerBase` + and labels as str """ def __init__(self): @@ -67,7 +65,8 @@ def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwa :class:`~.GroundTruthPath` type, the argument is modified to be a set to allow for iteration. mapping: list - List of 2 items specifying the mapping of the x and y components of the state space. + List of 3 items specifying the mapping of the x, y, and z position + components of the state space. \\*\\*kwargs: dict Additional arguments to be passed to plot function. Default is ``linestyle="--"``. """ @@ -79,9 +78,9 @@ def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwa for truth in truths: self.ax.plot3D([state.state_vector[mapping[0]] for state in truth], - [state.state_vector[mapping[1]] for state in truth], - [state.state_vector[mapping[2]] for state in truth], - **truths_kwargs) + [state.state_vector[mapping[1]] for state in truth], + [state.state_vector[mapping[2]] for state in truth], + **truths_kwargs) # Generate legend items truths_handle = Line2D([], [], linestyle=truths_kwargs['linestyle'], color='black') @@ -105,7 +104,8 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, measurements : list of :class:`~.Detection` Detections which will be plotted. If measurements is a set of lists it is flattened. mapping: list - List of 2 items specifying the mapping of the x and y components of the state space. + List of 3 items specifying the mapping of the x, y, and z position + components of the state space. measurement_model : :class:`~.Model`, optional User-defined measurement model to be used in finding measurement state inverses if they cannot be found from the measurements themselves. @@ -130,7 +130,7 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, if meas_model is None: meas_model = measurement_model # measurement_model from input - if isinstance(meas_model, LinearModel): # check to see if meas_model is a LinearModel type + if isinstance(meas_model, LinearModel): # meas_model is a LinearModel type model_matrix = meas_model.matrix() inv_model_matrix = np.linalg.pinv(model_matrix) state_vec = inv_model_matrix @ state.state_vector @@ -159,7 +159,9 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, if plot_detections: detection_array = np.array(plot_detections) - self.ax.scatter(detection_array[:, 0], detection_array[:, 1], detection_array[:, 2], **measurement_kwargs) + self.ax.scatter(detection_array[:, 0], + detection_array[:, 1], + detection_array[:, 2], **measurement_kwargs) measurements_handle = Line2D([], [], linestyle='', **measurement_kwargs) # Generate legend items for measurements @@ -167,7 +169,9 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, if plot_clutter: clutter_array = np.array(plot_clutter) - self.ax.scatter(clutter_array[:, 0], clutter_array[:, 1], clutter_array[:, 2], color='y', marker='2') + self.ax.scatter(clutter_array[:, 0], + clutter_array[:, 1], + clutter_array[:, 2], color='y', marker='2') clutter_handle = Line2D([], [], linestyle='', marker='2', color='y') clutter_label = "Clutter" @@ -177,16 +181,17 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, # Generate legend self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) - def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_label="Track", err_freq=1, + def plot_tracks(self, tracks, mapping, uncertainty=False, track_label="Track", err_freq=1, **kwargs): """Plots track(s) Plots each track generated, generating a legend automatically. If ``uncertainty=True``, - uncertainty ellipses are plotted. If ``particle=True``, particles are plotted. - Tracks are plotted as solid lines with point markers and default colors. - Uncertainty ellipses are plotted with a default color which is the same for all tracks. + uncertainty bars are plotted every :attr:`err_freq` measurement, default + plots unceratinty bars at every track step. Tracks are plotted as solid + lines with point markers and default colors. Uncertainty bars are plotted + with a default color which is the same for all tracks. - Users can change linestyle, color and marker using keyword arguments. Uncertainty ellipses + Users can change linestyle, color and marker using keyword arguments. Uncertainty bars will also be plotted with the user defined colour and any changes will apply to all tracks. Parameters @@ -195,13 +200,17 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ Set of tracks which will be plotted. If not a set, and instead a single :class:`~.Track` type, the argument is modified to be a set to allow for iteration. mapping: list - List of 2 items specifying the mapping of the x and y components of the state space. + List of 3 items specifying the mapping of the x, y, and z position + components of the state space. uncertainty : bool - If True, function plots uncertainty ellipses. + If True, function plots uncertainty bars in x, y, and z. particle : bool If True, function plots particles. track_label: str Label to apply to all tracks for legend. + err_freq: int + Frequency of error bar plotting on tracks. Default value is 1, meaning + error bars are plotted at every track step. \\*\\*kwargs: dict Additional arguments to be passed to plot function. Defaults are ``linestyle="-"``, ``marker='.'`` and ``color=None``. @@ -238,81 +247,23 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ for state in track: if not check % err_freq: w, v = np.linalg.eig(HH @ state.covar @ HH.T) - + xl = state.state_vector[mapping[0]] yl = state.state_vector[mapping[1]] zl = state.state_vector[mapping[2]] - + x_err = w[0] y_err = w[1] z_err = w[2] - - self.ax.plot3D([xl+x_err, xl-x_err], [yl, yl], [zl, zl], marker="_", color=tracks_kwargs['color']) - self.ax.plot3D([xl, xl], [yl+y_err, yl-y_err], [zl, zl], marker="_", color=tracks_kwargs['color']) - self.ax.plot3D([xl, xl], [yl, yl], [zl+z_err, zl-z_err], marker="_", color=tracks_kwargs['color']) - check += 1 - self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) - - elif particle: - # Plot particles - for track in tracks: - for state in track: - data = state.particles.state_vector[mapping[:2], :] - self.ax.plot(data[0], data[1], linestyle='', marker=".", - markersize=1, alpha=0.5) - - # Generate legend items for particles - particle_handle = Line2D([], [], linestyle='', color="black", marker='.', markersize=1) - particle_label = "Particles" - self.legend_dict[particle_label] = particle_handle - # Generate legend + self.ax.plot3D([xl+x_err, xl-x_err], [yl, yl], [zl, zl], + marker="_", color=tracks_kwargs['color']) + self.ax.plot3D([xl, xl], [yl+y_err, yl-y_err], [zl, zl], + marker="_", color=tracks_kwargs['color']) + self.ax.plot3D([xl, xl], [yl, yl], [zl+z_err, zl-z_err], + marker="_", color=tracks_kwargs['color']) + check += 1 self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) else: self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) - - # Ellipse legend patch (used in Tutorial 3) - @staticmethod - def ellipse_legend(ax, label_list, color_list, **kwargs): - """Adds an ellipse patch to the legend on the axes. One patch added for each item in - `label_list` with the corresponding color from `color_list`. - - Parameters - ---------- - ax : matplotlib.axes.Axes - Looks at the plot axes defined - label_list : list of str - Takes in list of strings intended to label ellipses in legend - color_list : list of str - Takes in list of colors corresponding to string/label - Must be the same length as label_list - \\*\\*kwargs: dict - Additional arguments to be passed to plot function. Default is ``alpha=0.2``. - """ - - ellipse_kwargs = dict(alpha=0.2) - ellipse_kwargs.update(kwargs) - - legend = ax.legend(handler_map={Ellipse: _HandlerEllipse()}) - handles, labels = ax.get_legend_handles_labels() - for color in color_list: - handle = Ellipse((0.5, 0.5), 0.5, 0.5, color=color, **ellipse_kwargs) - handles.append(handle) - for label in label_list: - labels.append(label) - legend._legend_box = None - legend._init_legend_box(handles, labels) - legend._set_loc(legend._loc) - legend.set_title(legend.get_title().get_text()) - - -class _HandlerEllipse(HandlerPatch): - def create_artists(self, legend, orig_handle, - xdescent, ydescent, width, height, fontsize, trans): - center = 0.5*width - 0.5*xdescent, 0.5*height - 0.5*ydescent - p = Ellipse(xy=center, width=width + xdescent, - height=height + ydescent) - self.update_prop(p, orig_handle, legend) - p.set_transform(trans) - return [p] From abf46adde3c65490cea813c4dcb9b5ff1a20595f Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Thu, 13 Jan 2022 16:33:25 -0500 Subject: [PATCH 06/15] Combined 2D/3D plotting functionality to plotter.py plotter.py now contains all functionality of original 2D plotter.py and 3D plotting of plotter3.py. Plotting type is now chosen by the user by using the optional Enumerate parameter "dimension" when instantiating Plotter(). Documentation has been updated to reflect these changes TODO -Add unit tests -Use flake8 to ensure the file complies with PEP8 standards -Submit pull request on Stone Soup Github repo --- stonesoup/plotter.py | 219 +++++++++++++++++++++++++++---------------- 1 file changed, 136 insertions(+), 83 deletions(-) diff --git a/stonesoup/plotter.py b/stonesoup/plotter.py index dbfb6eb66..850e578a8 100644 --- a/stonesoup/plotter.py +++ b/stonesoup/plotter.py @@ -10,6 +10,14 @@ from .types import detection from .models.base import LinearModel, NonLinearModel +from enum import Enum + +class Dimension(Enum): + """Dimension Enum class for specifying plotting parameters in the Plotter class. + Used to sanitize inputs for the dimension attribute of Plotter(). + """ + TWO = '2D' # 2D plotting mode (original plotter.py functionality) + THREE = '3D' # 3D plotting mode class Plotter: """Plotting class for building graphs of Stone Soup simulations @@ -17,6 +25,7 @@ class Plotter: A plotting class which is used to simplify the process of plotting ground truths, measurements, clutter and tracks. Tracks can be plotted with uncertainty ellipses or particles if required. Legends are automatically generated with each plot. + Three dimensional plots can be created using the optional dimension parameter. Attributes ---------- @@ -24,23 +33,29 @@ class Plotter: Generated figure for graphs to be plotted on ax: matplotlib.axes.Axes Generated axes for graphs to be plotted on - handles_list: list of :class:`matplotlib.legend_handler.HandlerBase` - A list of generated legend handles - labels_list: list of str - A list of generated legend labels + legend_dict: dict + Dictionary of legend handles as :class:`matplotlib.legend_handler.HandlerBase` + and labels as str """ - def __init__(self): + def __init__(self, dimension = Dimension.TWO): + self.dimension = dimension # Generate plot axes self.fig = plt.figure(figsize=(10, 6)) - self.ax = self.fig.add_subplot(1, 1, 1) + if self.dimension is Dimension.TWO: # 2D axes + self.ax = self.fig.add_subplot(1, 1, 1) + self.ax.axis('equal') + else: # 3D axes + self.ax = self.fig.add_subplot(111, projection='3d') + self.ax.axis('auto') + self.ax.set_zlabel("$z$") self.ax.set_xlabel("$x$") self.ax.set_ylabel("$y$") - self.ax.axis('equal') - # Create empty lists for legend handles and labels - self.handles_list = [] - self.labels_list = [] + # Create empty dictionary for legend handles and labels - dict used to + # prevent multiple entries with the same label from displaying on legend + # This is new compared to plotter.py + self.legend_dict = {} # create an empty dictionary to hold legend entries def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwargs): """Plots ground truth(s) @@ -58,7 +73,7 @@ def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwa :class:`~.GroundTruthPath` type, the argument is modified to be a set to allow for iteration. mapping: list - List of 2 items specifying the mapping of the x and y components of the state space. + List of items specifying the mapping of the position components of the state space. \\*\\*kwargs: dict Additional arguments to be passed to plot function. Default is ``linestyle="--"``. """ @@ -69,17 +84,20 @@ def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwa truths = {truths} # Make a set of length 1 for truth in truths: - self.ax.plot([state.state_vector[mapping[0]] for state in truth], - [state.state_vector[mapping[1]] for state in truth], - **truths_kwargs) - + if self.dimension is Dimension.TWO: # plots the ground truths in xy + self.ax.plot([state.state_vector[mapping[0]] for state in truth], + [state.state_vector[mapping[1]] for state in truth], + **truths_kwargs) + else: # plots the ground truths in xyz + self.ax.plot3D([state.state_vector[mapping[0]] for state in truth], + [state.state_vector[mapping[1]] for state in truth], + [state.state_vector[mapping[2]] for state in truth], + **truths_kwargs) # Generate legend items truths_handle = Line2D([], [], linestyle=truths_kwargs['linestyle'], color='black') - self.handles_list.append(truths_handle) - self.labels_list.append(truths_label) - + self.legend_dict[truths_label] = truths_handle # Generate legend - self.ax.legend(handles=self.handles_list, labels=self.labels_list) + self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) def plot_measurements(self, measurements, mapping, measurement_model=None, measurements_label="Measurements", **kwargs): @@ -97,7 +115,7 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, measurements : list of :class:`~.Detection` Detections which will be plotted. If measurements is a set of lists it is flattened. mapping: list - List of 2 items specifying the mapping of the x and y components of the state space. + List of items specifying the mapping of the position components of the state space. measurement_model : :class:`~.Model`, optional User-defined measurement model to be used in finding measurement state inverses if they cannot be found from the measurements themselves. @@ -151,36 +169,38 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, if plot_detections: detection_array = np.array(plot_detections) - self.ax.scatter(detection_array[:, 0], detection_array[:, 1], **measurement_kwargs) + # *detection_array.T unpacks detection_array by coloumns + # (same as passing in detection_array[:,0], detection_array[:,1], etc...) + self.ax.scatter(*detection_array.T, **measurement_kwargs) measurements_handle = Line2D([], [], linestyle='', **measurement_kwargs) # Generate legend items for measurements - self.handles_list.append(measurements_handle) - self.labels_list.append(measurements_label) + self.legend_dict[measurements_label] = measurements_handle if plot_clutter: clutter_array = np.array(plot_clutter) - self.ax.scatter(clutter_array[:, 0], clutter_array[:, 1], color='y', marker='2') + self.ax.scatter(*clutter_array.T, color='y', marker='2') clutter_handle = Line2D([], [], linestyle='', marker='2', color='y') clutter_label = "Clutter" # Generate legend items for clutter - self.handles_list.append(clutter_handle) - self.labels_list.append(clutter_label) + self.legend_dict[clutter_label] = clutter_handle # Generate legend - self.ax.legend(handles=self.handles_list, labels=self.labels_list) + self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_label="Track", - **kwargs): + err_freq=1, **kwargs): """Plots track(s) - Plots each track generated, generating a legend automatically. If ``uncertainty=True``, - uncertainty ellipses are plotted. If ``particle=True``, particles are plotted. - Tracks are plotted as solid lines with point markers and default colors. - Uncertainty ellipses are plotted with a default color which is the same for all tracks. + Plots each track generated, generating a legend automatically. If ``uncertainty=True`` + and is being plotted in 2D, error elipses are plotted. If beingp plotted in + 3D, uncertainty bars are plotted every :attr:`err_freq` measurement, default + plots unceratinty bars at every track step. Tracks are plotted as solid + lines with point markers and default colors. Uncertainty bars are plotted + with a default color which is the same for all tracks. - Users can change linestyle, color and marker using keyword arguments. Uncertainty ellipses + Users can change linestyle, color and marker using keyword arguments. Uncertainty bars will also be plotted with the user defined colour and any changes will apply to all tracks. Parameters @@ -189,13 +209,17 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ Set of tracks which will be plotted. If not a set, and instead a single :class:`~.Track` type, the argument is modified to be a set to allow for iteration. mapping: list - List of 2 items specifying the mapping of the x and y components of the state space. + List of 3 items specifying the mapping of the x, y, and z position + components of the state space. uncertainty : bool - If True, function plots uncertainty ellipses. + If True, function plots uncertainty bars in x, y, and z. particle : bool If True, function plots particles. track_label: str Label to apply to all tracks for legend. + err_freq: int + Frequency of error bar plotting on tracks. Default value is 1, meaning + error bars are plotted at every track step. \\*\\*kwargs: dict Additional arguments to be passed to plot function. Defaults are ``linestyle="-"``, ``marker='.'`` and ``color=None``. @@ -209,9 +233,15 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ # Plot tracks track_colors = {} for track in tracks: - line = self.ax.plot([state.state_vector[mapping[0]] for state in track], - [state.state_vector[mapping[1]] for state in track], - **tracks_kwargs) + if self.dimension is Dimension.TWO: + line = self.ax.plot([state.state_vector[mapping[0]] for state in track], + [state.state_vector[mapping[1]] for state in track], + **tracks_kwargs) + else: + line = self.ax.plot([state.state_vector[mapping[0]] for state in track], + [state.state_vector[mapping[1]] for state in track], + [state.state_vector[mapping[2]] for state in track], + **tracks_kwargs) track_colors[track] = plt.getp(line[0], 'color') # Assuming a single track or all plotted as the same colour then the following will work. @@ -221,55 +251,78 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ # Generate legend items for track track_handle = Line2D([], [], linestyle=tracks_kwargs['linestyle'], marker=tracks_kwargs['marker'], color=tracks_kwargs['color']) - self.handles_list.append(track_handle) - self.labels_list.append(track_label) - + self.legend_dict[track_label] = track_handle if uncertainty: - # Plot uncertainty ellipses - for track in tracks: - HH = np.eye(track.ndim)[mapping, :] # Get position mapping matrix - for state in track: - w, v = np.linalg.eig(HH @ state.covar @ HH.T) - max_ind = np.argmax(w) - min_ind = np.argmin(w) - orient = np.arctan2(v[1, max_ind], v[0, max_ind]) - ellipse = Ellipse(xy=state.state_vector[mapping[:2], 0], - width=2 * np.sqrt(w[max_ind]), - height=2 * np.sqrt(w[min_ind]), - angle=np.rad2deg(orient), alpha=0.2, - color=track_colors[track]) - self.ax.add_artist(ellipse) - - # Generate legend items for uncertainty ellipses - ellipse_handle = Ellipse((0.5, 0.5), 0.5, 0.5, alpha=0.2, color=tracks_kwargs['color']) - ellipse_label = "Uncertainty" - - self.handles_list.append(ellipse_handle) - self.labels_list.append(ellipse_label) - - # Generate legend - self.ax.legend(handles=self.handles_list, labels=self.labels_list, - handler_map={Ellipse: _HandlerEllipse()}) + if self.dimension is Dimension.TWO: + # Plot uncertainty ellipses + for track in tracks: + HH = np.eye(track.ndim)[mapping, :] # Get position mapping matrix + for state in track: + w, v = np.linalg.eig(HH @ state.covar @ HH.T) + max_ind = np.argmax(w) + min_ind = np.argmin(w) + orient = np.arctan2(v[1, max_ind], v[0, max_ind]) + ellipse = Ellipse(xy=state.state_vector[mapping[:2], 0], + width=2 * np.sqrt(w[max_ind]), + height=2 * np.sqrt(w[min_ind]), + angle=np.rad2deg(orient), alpha=0.2, + color=track_colors[track]) + self.ax.add_artist(ellipse) + + # Generate legend items for uncertainty ellipses + ellipse_handle = Ellipse((0.5, 0.5), 0.5, 0.5, alpha=0.2, color=tracks_kwargs['color']) + ellipse_label = "Uncertainty" + self.legend_dict[ellipse_label] = ellipse_handle + # Generate legend + self.ax.legend(handles=self.legend_dict.values(), + labels=self.legend_dict.keys(), + handler_map={Ellipse: _HandlerEllipse()}) + else: + # Plot 3D error bars on tracks + for track in tracks: + HH = np.eye(track.ndim)[mapping, :] # Get position mapping matrix + check = err_freq + for state in track: + if not check % err_freq: + w, v = np.linalg.eig(HH @ state.covar @ HH.T) + + xl = state.state_vector[mapping[0]] + yl = state.state_vector[mapping[1]] + zl = state.state_vector[mapping[2]] + + x_err = w[0] + y_err = w[1] + z_err = w[2] + + self.ax.plot3D([xl+x_err, xl-x_err], [yl, yl], [zl, zl], + marker="_", color=tracks_kwargs['color']) + self.ax.plot3D([xl, xl], [yl+y_err, yl-y_err], [zl, zl], + marker="_", color=tracks_kwargs['color']) + self.ax.plot3D([xl, xl], [yl, yl], [zl+z_err, zl-z_err], + marker="_", color=tracks_kwargs['color']) + check += 1 elif particle: - # Plot particles - for track in tracks: - for state in track: - data = state.particles.state_vector[mapping[:2], :] - self.ax.plot(data[0], data[1], linestyle='', marker=".", - markersize=1, alpha=0.5) - - # Generate legend items for particles - particle_handle = Line2D([], [], linestyle='', color="black", marker='.', markersize=1) - particle_label = "Particles" - self.handles_list.append(particle_handle) - self.labels_list.append(particle_label) - - # Generate legend - self.ax.legend(handles=self.handles_list, labels=self.labels_list) + if self.dimension is Dimension.TWO: + # Plot particles + for track in tracks: + for state in track: + data = state.particles.state_vector[mapping[:2], :] + self.ax.plot(data[0], data[1], linestyle='', marker=".", + markersize=1, alpha=0.5) + + # Generate legend items for particles + particle_handle = Line2D([], [], linestyle='', color="black", marker='.', markersize=1) + particle_label = "Particles" + self.legend_dict[particle_label] = particle_handle + # Generate legend + self.ax.legend(handles=self.legend_dict.values(), + labels=self.legend_dict.keys()) #particle error legend + else: + warnings.warn('Particle plotting is not supported for 3D visualization') else: - self.ax.legend(handles=self.handles_list, labels=self.labels_list) + self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) # Ellipse legend patch (used in Tutorial 3) @staticmethod @@ -314,4 +367,4 @@ def create_artists(self, legend, orig_handle, height=height + ydescent) self.update_prop(p, orig_handle, legend) p.set_transform(trans) - return [p] + return [p] \ No newline at end of file From 6c07ca43c732ed117f1eaac7856627eb79afcbdb Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Fri, 14 Jan 2022 17:43:11 -0500 Subject: [PATCH 07/15] plotter.py and test_plotter.py unit tests/flake8 Ready to PR --- stonesoup/plotter.py | 49 ++++++++++---- stonesoup/tests/test_plotter.py | 110 ++++++++++++++++++++++++++++++++ 2 files changed, 146 insertions(+), 13 deletions(-) create mode 100644 stonesoup/tests/test_plotter.py diff --git a/stonesoup/plotter.py b/stonesoup/plotter.py index 850e578a8..b6c911350 100644 --- a/stonesoup/plotter.py +++ b/stonesoup/plotter.py @@ -12,12 +12,21 @@ from enum import Enum + class Dimension(Enum): """Dimension Enum class for specifying plotting parameters in the Plotter class. Used to sanitize inputs for the dimension attribute of Plotter(). + + Attributes + ---------- + TWO: str + Specifies 2D plotting for Plotter object + THREE: str + Specifies 3D plotting for Plotter object """ - TWO = '2D' # 2D plotting mode (original plotter.py functionality) - THREE = '3D' # 3D plotting mode + TWO = 2 # 2D plotting mode (original plotter.py functionality) + THREE = 3 # 3D plotting mode + class Plotter: """Plotting class for building graphs of Stone Soup simulations @@ -27,6 +36,11 @@ class Plotter: particles if required. Legends are automatically generated with each plot. Three dimensional plots can be created using the optional dimension parameter. + Parameters + ---------- + dimension: enum \'Dimension\' + Optional parameter to specify 2D or 3D plotting. Default is 2D plotting. + Attributes ---------- fig: matplotlib.figure.Figure @@ -38,8 +52,12 @@ class Plotter: and labels as str """ - def __init__(self, dimension = Dimension.TWO): - self.dimension = dimension + def __init__(self, dimension=Dimension.TWO): + if isinstance(dimension, type(Dimension.TWO)): + self.dimension = dimension + else: + raise TypeError("""%s is an unsupported type for \'dimension\'; + expected type %s""" % (type(dimension), type(Dimension.TWO))) # Generate plot axes self.fig = plt.figure(figsize=(10, 6)) if self.dimension is Dimension.TWO: # 2D axes @@ -88,11 +106,13 @@ def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwa self.ax.plot([state.state_vector[mapping[0]] for state in truth], [state.state_vector[mapping[1]] for state in truth], **truths_kwargs) - else: # plots the ground truths in xyz + elif self.dimension is Dimension.THREE: # plots the ground truths in xyz self.ax.plot3D([state.state_vector[mapping[0]] for state in truth], [state.state_vector[mapping[1]] for state in truth], [state.state_vector[mapping[2]] for state in truth], **truths_kwargs) + else: + raise NotImplementedError('Unsupported dimension type for truth plotting') # Generate legend items truths_handle = Line2D([], [], linestyle=truths_kwargs['linestyle'], color='black') self.legend_dict[truths_label] = truths_handle @@ -169,7 +189,7 @@ def plot_measurements(self, measurements, mapping, measurement_model=None, if plot_detections: detection_array = np.array(plot_detections) - # *detection_array.T unpacks detection_array by coloumns + # *detection_array.T unpacks detection_array by coloumns # (same as passing in detection_array[:,0], detection_array[:,1], etc...) self.ax.scatter(*detection_array.T, **measurement_kwargs) measurements_handle = Line2D([], [], linestyle='', **measurement_kwargs) @@ -268,9 +288,10 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ angle=np.rad2deg(orient), alpha=0.2, color=track_colors[track]) self.ax.add_artist(ellipse) - + # Generate legend items for uncertainty ellipses - ellipse_handle = Ellipse((0.5, 0.5), 0.5, 0.5, alpha=0.2, color=tracks_kwargs['color']) + ellipse_handle = Ellipse((0.5, 0.5), 0.5, 0.5, alpha=0.2, + color=tracks_kwargs['color']) ellipse_label = "Uncertainty" self.legend_dict[ellipse_label] = ellipse_handle # Generate legend @@ -310,16 +331,18 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ data = state.particles.state_vector[mapping[:2], :] self.ax.plot(data[0], data[1], linestyle='', marker=".", markersize=1, alpha=0.5) - + # Generate legend items for particles - particle_handle = Line2D([], [], linestyle='', color="black", marker='.', markersize=1) + particle_handle = Line2D([], [], linestyle='', color="black", marker='.', + markersize=1) particle_label = "Particles" self.legend_dict[particle_label] = particle_handle # Generate legend self.ax.legend(handles=self.legend_dict.values(), - labels=self.legend_dict.keys()) #particle error legend + labels=self.legend_dict.keys()) # particle error legend else: - warnings.warn('Particle plotting is not supported for 3D visualization') + raise NotImplementedError("""Particle plotting is not currently supported for + 3D visualization""") else: self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) @@ -367,4 +390,4 @@ def create_artists(self, legend, orig_handle, height=height + ydescent) self.update_prop(p, orig_handle, legend) p.set_transform(trans) - return [p] \ No newline at end of file + return [p] diff --git a/stonesoup/tests/test_plotter.py b/stonesoup/tests/test_plotter.py new file mode 100644 index 000000000..e2eb330a9 --- /dev/null +++ b/stonesoup/tests/test_plotter.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +import numpy as np +from stonesoup.plotter import Plotter, Dimension +import pytest +import matplotlib.pyplot as plt + +# Setup simulation to test the plotter functionality +from datetime import datetime +from datetime import timedelta + +from stonesoup.types.detection import TrueDetection +from stonesoup.models.measurement.linear import LinearGaussian + +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState + +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.updater.kalman import KalmanUpdater + +from stonesoup.hypothesiser.distance import DistanceHypothesiser +from stonesoup.measures import Mahalanobis + +from stonesoup.dataassociator.neighbour import NearestNeighbour +from stonesoup.types.state import GaussianState + +from stonesoup.types.track import Track + +np.random.seed(1991) + +start_time = datetime.now() +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.005), + ConstantVelocity(0.005)]) +truth = GroundTruthPath([GroundTruthState([0, 1, 0, 1], timestamp=start_time)]) +for k in range(1, 21): + truth.append(GroundTruthState( + transition_model.function(truth[k-1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time+timedelta(seconds=k))) + +prob_det = 0.5 + +measurement_model = LinearGaussian( + ndim_state=4, + mapping=(0, 2), + noise_covar=np.array([[0.75, 0], + [0, 0.75]])) +all_measurements = [] +for state in truth: + measurement_set = set() + # Generate actual detection from the state with a 1-p_d chance that no detection is received. + if np.random.rand() <= prob_det: + measurement = measurement_model.function(state, noise=True) + measurement_set.add(TrueDetection(state_vector=measurement, + groundtruth_path=truth, + timestamp=state.timestamp, + measurement_model=measurement_model)) + + all_measurements.append(measurement_set) + +predictor = KalmanPredictor(transition_model) +updater = KalmanUpdater(measurement_model) +hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=3) +data_associator = NearestNeighbour(hypothesiser) + +# Run Kalman filter with data association +# Create prior +prior = GaussianState([[0], [1], [0], [1]], np.diag([1.5, 0.5, 1.5, 0.5]), timestamp=start_time) +track = Track([prior]) +for n, measurements in enumerate(all_measurements): + hypotheses = data_associator.associate([track], + measurements, + start_time + timedelta(seconds=n)) + hypothesis = hypotheses[track] # get the hypothesis for the specified track + + if hypothesis.measurement: + post = updater.update(hypothesis) + track.append(post) + else: # When data associator says no detections are good enough, we'll keep the prediction + track.append(hypothesis.prediction) + +plotter = Plotter() +# Test functions + + +def test_dimension_raise(): + with pytest.raises(TypeError): + Plotter(dimension=1) # expected to raise TypeError + + +def test_dimension_inlist(): # ensure dimension type is in predefined enum list + with pytest.raises(AttributeError): + Plotter(dimension=Dimension.TESTERROR) + + +def test_measurements_legend(): + plotter.plot_measurements(all_measurements, [0, 2]) # Measurements entry in legend dict + plt.close() + assert 'Measurements' in plotter.legend_dict + + +def test_measurement_clutter(): # no clutter should be plotted + plotter.plot_measurements(all_measurements, [0, 2]) + plt.close() + assert 'Clutter' not in plotter.legend_dict + + +def test_particle_3d(): # warning should arise if particle is attempted in 3d mode + plotter3 = Plotter(dimension=Dimension.THREE) + with pytest.raises(NotImplementedError): + plotter3.plot_tracks(track, [0, 1, 2], particle=True, uncertainty=False) From c313c20cf66030902ee1fe69984f62a9ca159b20 Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Mon, 17 Jan 2022 11:47:10 -0500 Subject: [PATCH 08/15] Delete plotter3.py Deprecated - functionality now contained fully within plotter.py --- stonesoup/plotter3.py | 269 ------------------------------------------ 1 file changed, 269 deletions(-) delete mode 100644 stonesoup/plotter3.py diff --git a/stonesoup/plotter3.py b/stonesoup/plotter3.py deleted file mode 100644 index 4bf322f20..000000000 --- a/stonesoup/plotter3.py +++ /dev/null @@ -1,269 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Nov 18 11:33:27 2021 - -@author: Peter -""" -import warnings -from itertools import chain - -from mpl_toolkits import mplot3d # new as of plotter3 - -import numpy as np -from matplotlib import pyplot as plt -from matplotlib.lines import Line2D - -from .types import detection -from .models.base import LinearModel, NonLinearModel - - -class Plotter: - """3D plotting class for building 3D visualizations of Stone Soup simulations - - A plotting class which is used to simplify the process of plotting ground truths, - measurements, clutter and tracks with information in three spatial dimensions. - Tracks can be plotted with uncertainty bars at specified intervals if required. - Legends are automatically generated with each plot. - - Attributes - ---------- - fig: matplotlib.figure.Figure - Generated figure for graphs to be plotted on - ax: matplotlib.axes.Axes - Generated axes for graphs to be plotted on - legend_dict: dict - Dictionary of legend handles as :class:`matplotlib.legend_handler.HandlerBase` - and labels as str - """ - - def __init__(self): - # Generate plot axes - self.fig = plt.figure(figsize=(10, 6)) - self.ax = self.fig.add_subplot(111, projection='3d') - self.ax.set_xlabel("$x$") - self.ax.set_ylabel("$y$") - self.ax.set_zlabel("$z$") - self.ax.axis('auto') - - # Create empty dictionary for legend handles and labels - dict used to - # prevent multiple entries with the same label from displaying on legend - self.legend_dict = {} # create an empty dictionary to hold legend entries - - def plot_ground_truths(self, truths, mapping, truths_label="Ground Truth", **kwargs): - """Plots ground truth(s) - - Plots each ground truth path passed in to :attr:`truths` and generates a legend - automatically. Ground truths are plotted as dashed lines with default colors. - - Users can change linestyle, color and marker using keyword arguments. Any changes - will apply to all ground truths. - - Parameters - ---------- - truths : set of :class:`~.GroundTruthPath` - Set of ground truths which will be plotted. If not a set, and instead a single - :class:`~.GroundTruthPath` type, the argument is modified to be a set to allow for - iteration. - mapping: list - List of 3 items specifying the mapping of the x, y, and z position - components of the state space. - \\*\\*kwargs: dict - Additional arguments to be passed to plot function. Default is ``linestyle="--"``. - """ - - truths_kwargs = dict(linestyle="--") - truths_kwargs.update(kwargs) - if not isinstance(truths, set): - truths = {truths} # Make a set of length 1 - - for truth in truths: - self.ax.plot3D([state.state_vector[mapping[0]] for state in truth], - [state.state_vector[mapping[1]] for state in truth], - [state.state_vector[mapping[2]] for state in truth], - **truths_kwargs) - - # Generate legend items - truths_handle = Line2D([], [], linestyle=truths_kwargs['linestyle'], color='black') - self.legend_dict[truths_label] = truths_handle - # Generate legend - self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) - - def plot_measurements(self, measurements, mapping, measurement_model=None, - measurements_label="Measurements", **kwargs): - """Plots measurements - - Plots detections and clutter, generating a legend automatically. Detections are plotted as - blue circles by default unless the detection type is clutter. - If the detection type is :class:`~.Clutter` it is plotted as a yellow 'tri-up' marker. - - Users can change the color and marker of detections using keyword arguments but not for - clutter detections. - - Parameters - ---------- - measurements : list of :class:`~.Detection` - Detections which will be plotted. If measurements is a set of lists it is flattened. - mapping: list - List of 3 items specifying the mapping of the x, y, and z position - components of the state space. - measurement_model : :class:`~.Model`, optional - User-defined measurement model to be used in finding measurement state inverses if - they cannot be found from the measurements themselves. - \\*\\*kwargs: dict - Additional arguments to be passed to plot function for detections. Defaults are - ``marker='o'`` and ``color='b'``. - """ - - measurement_kwargs = dict(marker='o', color='b') - measurement_kwargs.update(kwargs) - - if any(isinstance(item, set) for item in measurements): - measurements_set = chain.from_iterable(measurements) # Flatten into one set - else: - measurements_set = measurements - - plot_detections = [] - plot_clutter = [] - - for state in measurements_set: - meas_model = state.measurement_model # measurement_model from detections - if meas_model is None: - meas_model = measurement_model # measurement_model from input - - if isinstance(meas_model, LinearModel): # meas_model is a LinearModel type - model_matrix = meas_model.matrix() - inv_model_matrix = np.linalg.pinv(model_matrix) - state_vec = inv_model_matrix @ state.state_vector - - elif isinstance(meas_model, NonLinearModel): - try: - state_vec = meas_model.inverse_function(state) - except (NotImplementedError, AttributeError): - warnings.warn('Nonlinear measurement model used with no inverse ' - 'function available') - continue - else: - warnings.warn('Measurement model type not specified for all detections') - continue - - if isinstance(state, detection.Clutter): - # Plot clutter - plot_clutter.append((*state_vec[mapping], )) - - elif isinstance(state, detection.Detection): - # Plot detections - plot_detections.append((*state_vec[mapping], )) - else: - warnings.warn(f'Unknown type {type(state)}') - continue - - if plot_detections: - detection_array = np.array(plot_detections) - self.ax.scatter(detection_array[:, 0], - detection_array[:, 1], - detection_array[:, 2], **measurement_kwargs) - measurements_handle = Line2D([], [], linestyle='', **measurement_kwargs) - - # Generate legend items for measurements - self.legend_dict[measurements_label] = measurements_handle - - if plot_clutter: - clutter_array = np.array(plot_clutter) - self.ax.scatter(clutter_array[:, 0], - clutter_array[:, 1], - clutter_array[:, 2], color='y', marker='2') - clutter_handle = Line2D([], [], linestyle='', marker='2', color='y') - clutter_label = "Clutter" - - # Generate legend items for clutter - self.legend_dict[clutter_label] = clutter_handle - - # Generate legend - self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) - - def plot_tracks(self, tracks, mapping, uncertainty=False, track_label="Track", err_freq=1, - **kwargs): - """Plots track(s) - - Plots each track generated, generating a legend automatically. If ``uncertainty=True``, - uncertainty bars are plotted every :attr:`err_freq` measurement, default - plots unceratinty bars at every track step. Tracks are plotted as solid - lines with point markers and default colors. Uncertainty bars are plotted - with a default color which is the same for all tracks. - - Users can change linestyle, color and marker using keyword arguments. Uncertainty bars - will also be plotted with the user defined colour and any changes will apply to all tracks. - - Parameters - ---------- - tracks : set of :class:`~.Track` - Set of tracks which will be plotted. If not a set, and instead a single - :class:`~.Track` type, the argument is modified to be a set to allow for iteration. - mapping: list - List of 3 items specifying the mapping of the x, y, and z position - components of the state space. - uncertainty : bool - If True, function plots uncertainty bars in x, y, and z. - particle : bool - If True, function plots particles. - track_label: str - Label to apply to all tracks for legend. - err_freq: int - Frequency of error bar plotting on tracks. Default value is 1, meaning - error bars are plotted at every track step. - \\*\\*kwargs: dict - Additional arguments to be passed to plot function. Defaults are ``linestyle="-"``, - ``marker='.'`` and ``color=None``. - """ - - tracks_kwargs = dict(linestyle='-', marker=".", color=None) - tracks_kwargs.update(kwargs) - if not isinstance(tracks, set): - tracks = {tracks} # Make a set of length 1 - - # Plot tracks - track_colors = {} - for track in tracks: - line = self.ax.plot([state.state_vector[mapping[0]] for state in track], - [state.state_vector[mapping[1]] for state in track], - [state.state_vector[mapping[2]] for state in track], - **tracks_kwargs) - track_colors[track] = plt.getp(line[0], 'color') - - # Assuming a single track or all plotted as the same colour then the following will work. - # Otherwise will just render the final track colour. - tracks_kwargs['color'] = plt.getp(line[0], 'color') - - # Generate legend items for track - track_handle = Line2D([], [], linestyle=tracks_kwargs['linestyle'], - marker=tracks_kwargs['marker'], color=tracks_kwargs['color']) - self.legend_dict[track_label] = track_handle - - if uncertainty: - # Plot uncertainty ellipses - for track in tracks: - HH = np.eye(track.ndim)[mapping, :] # Get position mapping matrix - check = err_freq - for state in track: - if not check % err_freq: - w, v = np.linalg.eig(HH @ state.covar @ HH.T) - - xl = state.state_vector[mapping[0]] - yl = state.state_vector[mapping[1]] - zl = state.state_vector[mapping[2]] - - x_err = w[0] - y_err = w[1] - z_err = w[2] - - self.ax.plot3D([xl+x_err, xl-x_err], [yl, yl], [zl, zl], - marker="_", color=tracks_kwargs['color']) - self.ax.plot3D([xl, xl], [yl+y_err, yl-y_err], [zl, zl], - marker="_", color=tracks_kwargs['color']) - self.ax.plot3D([xl, xl], [yl, yl], [zl+z_err, zl-z_err], - marker="_", color=tracks_kwargs['color']) - check += 1 - self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) - - else: - self.ax.legend(handles=self.legend_dict.values(), labels=self.legend_dict.keys()) From f239056bc25457f821dc15463c3eb68c5cb01332 Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Mon, 17 Jan 2022 12:02:48 -0500 Subject: [PATCH 09/15] Delete .egg repository Delete cached python libraries --- .eggs/README.txt | 6 - .../packaging-21.3-py3.8.egg/EGG-INFO/LICENSE | 3 - .../EGG-INFO/LICENSE.APACHE | 177 - .../EGG-INFO/LICENSE.BSD | 23 - .../EGG-INFO/PKG-INFO | 453 -- .../packaging-21.3-py3.8.egg/EGG-INFO/RECORD | 19 - .eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL | 5 - .../EGG-INFO/requires.txt | 1 - .../EGG-INFO/top_level.txt | 1 - .../packaging/__about__.py | 26 - .../packaging/__init__.py | 25 - .../packaging/_manylinux.py | 301 - .../packaging/_musllinux.py | 136 - .../packaging/_structures.py | 61 - .../packaging/markers.py | 304 - .../packaging/py.typed | 0 .../packaging/requirements.py | 146 - .../packaging/specifiers.py | 802 --- .../packaging/tags.py | 487 -- .../packaging/utils.py | 136 - .../packaging/version.py | 504 -- .../EGG-INFO/LICENSE | 18 - .../EGG-INFO/PKG-INFO | 109 - .../pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD | 17 - .../pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL | 5 - .../EGG-INFO/requires.txt | 4 - .../EGG-INFO/top_level.txt | 1 - .../pyparsing/__init__.py | 328 - .../pyparsing/actions.py | 207 - .../pyparsing/common.py | 424 -- .../pyparsing/core.py | 5772 ----------------- .../pyparsing/diagram/__init__.py | 593 -- .../pyparsing/diagram/template.jinja2 | 26 - .../pyparsing/exceptions.py | 267 - .../pyparsing/helpers.py | 1059 --- .../pyparsing/results.py | 758 --- .../pyparsing/testing.py | 331 - .../pyparsing/unicode.py | 332 - .../pyparsing/util.py | 234 - .../EGG-INFO/LICENSE | 17 - .../EGG-INFO/PKG-INFO | 639 -- .../EGG-INFO/RECORD | 23 - .../EGG-INFO/WHEEL | 5 - .../EGG-INFO/entry_points.txt | 37 - .../EGG-INFO/requires.txt | 6 - .../EGG-INFO/top_level.txt | 1 - .../EGG-INFO/zip-safe | 1 - .../setuptools_scm/__init__.py | 212 - .../setuptools_scm/__main__.py | 15 - .../setuptools_scm/_version_cls.py | 49 - .../setuptools_scm/config.py | 212 - .../setuptools_scm/discover.py | 58 - .../setuptools_scm/file_finder.py | 70 - .../setuptools_scm/file_finder_git.py | 93 - .../setuptools_scm/file_finder_hg.py | 49 - .../setuptools_scm/git.py | 220 - .../setuptools_scm/hacks.py | 40 - .../setuptools_scm/hg.py | 169 - .../setuptools_scm/hg_git.py | 133 - .../setuptools_scm/integration.py | 94 - .../setuptools_scm/scm_workdir.py | 15 - .../setuptools_scm/utils.py | 154 - .../setuptools_scm/version.py | 460 -- .../EGG-INFO/LICENSE | 17 - .../EGG-INFO/PKG-INFO | 44 - .../EGG-INFO/RECORD | 7 - .../EGG-INFO/WHEEL | 6 - .../EGG-INFO/entry_points.txt | 6 - .../EGG-INFO/top_level.txt | 1 - .../setuptools_scm_git_archive/__init__.py | 21 - .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE | 21 - .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO | 208 - .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD | 9 - .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL | 4 - .eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py | 9 - .eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py | 663 -- .eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py | 101 - .eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py | 6 - .eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed | 1 - 79 files changed, 17997 deletions(-) delete mode 100644 .eggs/README.txt delete mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE delete mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.APACHE delete mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.BSD delete mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/PKG-INFO delete mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/RECORD delete mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL delete mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/requires.txt delete mode 100644 .eggs/packaging-21.3-py3.8.egg/EGG-INFO/top_level.txt delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/__about__.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/__init__.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/_manylinux.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/_musllinux.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/_structures.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/markers.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/py.typed delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/requirements.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/specifiers.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/tags.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/utils.py delete mode 100644 .eggs/packaging-21.3-py3.8.egg/packaging/version.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/LICENSE delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/PKG-INFO delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/requires.txt delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/top_level.txt delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/__init__.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/actions.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/common.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/core.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/__init__.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/template.jinja2 delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/exceptions.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/helpers.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/results.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/testing.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/unicode.py delete mode 100644 .eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/util.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/LICENSE delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/PKG-INFO delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/RECORD delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/WHEEL delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/entry_points.txt delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/requires.txt delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/top_level.txt delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/zip-safe delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__init__.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__main__.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/_version_cls.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/config.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/discover.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_git.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_hg.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/git.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hacks.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg_git.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/integration.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/scm_workdir.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/utils.py delete mode 100644 .eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/version.py delete mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/LICENSE delete mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/PKG-INFO delete mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/RECORD delete mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/WHEEL delete mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/entry_points.txt delete mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/top_level.txt delete mode 100644 .eggs/setuptools_scm_git_archive-1.1-py3.8.egg/setuptools_scm_git_archive/__init__.py delete mode 100644 .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE delete mode 100644 .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO delete mode 100644 .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD delete mode 100644 .eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL delete mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py delete mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py delete mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py delete mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py delete mode 100644 .eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed diff --git a/.eggs/README.txt b/.eggs/README.txt deleted file mode 100644 index 5d0166882..000000000 --- a/.eggs/README.txt +++ /dev/null @@ -1,6 +0,0 @@ -This directory contains eggs that were downloaded by setuptools to build, test, and run plug-ins. - -This directory caches those eggs to prevent repeated downloads. - -However, it is safe to delete this directory. - diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE deleted file mode 100644 index 6f62d44e4..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE +++ /dev/null @@ -1,3 +0,0 @@ -This software is made available under the terms of *either* of the licenses -found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made -under the terms of *both* these licenses. diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.APACHE b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.APACHE deleted file mode 100644 index f433b1a53..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.APACHE +++ /dev/null @@ -1,177 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.BSD b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.BSD deleted file mode 100644 index 42ce7b75c..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/LICENSE.BSD +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) Donald Stufft and individual contributors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/PKG-INFO deleted file mode 100644 index 358ace536..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/PKG-INFO +++ /dev/null @@ -1,453 +0,0 @@ -Metadata-Version: 2.1 -Name: packaging -Version: 21.3 -Summary: Core utilities for Python packages -Home-page: https://github.com/pypa/packaging -Author: Donald Stufft and individual contributors -Author-email: donald@stufft.io -License: BSD-2-Clause or Apache-2.0 -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: Apache Software License -Classifier: License :: OSI Approved :: BSD License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: 3.10 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Requires-Python: >=3.6 -Description-Content-Type: text/x-rst -License-File: LICENSE -License-File: LICENSE.APACHE -License-File: LICENSE.BSD -Requires-Dist: pyparsing (!=3.0.5,>=2.0.2) - -packaging -========= - -.. start-intro - -Reusable core utilities for various Python Packaging -`interoperability specifications `_. - -This library provides utilities that implement the interoperability -specifications which have clearly one correct behaviour (eg: :pep:`440`) -or benefit greatly from having a single shared implementation (eg: :pep:`425`). - -.. end-intro - -The ``packaging`` project includes the following: version handling, specifiers, -markers, requirements, tags, utilities. - -Documentation -------------- - -The `documentation`_ provides information and the API for the following: - -- Version Handling -- Specifiers -- Markers -- Requirements -- Tags -- Utilities - -Installation ------------- - -Use ``pip`` to install these utilities:: - - pip install packaging - -Discussion ----------- - -If you run into bugs, you can file them in our `issue tracker`_. - -You can also join ``#pypa`` on Freenode to ask questions or get involved. - - -.. _`documentation`: https://packaging.pypa.io/ -.. _`issue tracker`: https://github.com/pypa/packaging/issues - - -Code of Conduct ---------------- - -Everyone interacting in the packaging project's codebases, issue trackers, chat -rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_. - -.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md - -Contributing ------------- - -The ``CONTRIBUTING.rst`` file outlines how to contribute to this project as -well as how to report a potential security issue. The documentation for this -project also covers information about `project development`_ and `security`_. - -.. _`project development`: https://packaging.pypa.io/en/latest/development/ -.. _`security`: https://packaging.pypa.io/en/latest/security/ - -Project History ---------------- - -Please review the ``CHANGELOG.rst`` file or the `Changelog documentation`_ for -recent changes and project history. - -.. _`Changelog documentation`: https://packaging.pypa.io/en/latest/changelog/ - -Changelog ---------- - -21.3 - 2021-11-17 -~~~~~~~~~~~~~~~~~ - -* Add a ``pp3-none-any`` tag (`#311 `__) -* Replace the blank pyparsing 3 exclusion with a 3.0.5 exclusion (`#481 `__, `#486 `__) -* Fix a spelling mistake (`#479 `__) - -21.2 - 2021-10-29 -~~~~~~~~~~~~~~~~~ - -* Update documentation entry for 21.1. - -21.1 - 2021-10-29 -~~~~~~~~~~~~~~~~~ - -* Update pin to pyparsing to exclude 3.0.0. - -21.0 - 2021-07-03 -~~~~~~~~~~~~~~~~~ - -* PEP 656: musllinux support (`#411 `__) -* Drop support for Python 2.7, Python 3.4 and Python 3.5. -* Replace distutils usage with sysconfig (`#396 `__) -* Add support for zip files in ``parse_sdist_filename`` (`#429 `__) -* Use cached ``_hash`` attribute to short-circuit tag equality comparisons (`#417 `__) -* Specify the default value for the ``specifier`` argument to ``SpecifierSet`` (`#437 `__) -* Proper keyword-only "warn" argument in packaging.tags (`#403 `__) -* Correctly remove prerelease suffixes from ~= check (`#366 `__) -* Fix type hints for ``Version.post`` and ``Version.dev`` (`#393 `__) -* Use typing alias ``UnparsedVersion`` (`#398 `__) -* Improve type inference for ``packaging.specifiers.filter()`` (`#430 `__) -* Tighten the return type of ``canonicalize_version()`` (`#402 `__) - -20.9 - 2021-01-29 -~~~~~~~~~~~~~~~~~ - -* Run `isort `_ over the code base (`#377 `__) -* Add support for the ``macosx_10_*_universal2`` platform tags (`#379 `__) -* Introduce ``packaging.utils.parse_wheel_filename()`` and ``parse_sdist_filename()`` - (`#387 `__ and `#389 `__) - -20.8 - 2020-12-11 -~~~~~~~~~~~~~~~~~ - -* Revert back to setuptools for compatibility purposes for some Linux distros (`#363 `__) -* Do not insert an underscore in wheel tags when the interpreter version number - is more than 2 digits (`#372 `__) - -20.7 - 2020-11-28 -~~~~~~~~~~~~~~~~~ - -No unreleased changes. - -20.6 - 2020-11-28 -~~~~~~~~~~~~~~~~~ - -.. note:: This release was subsequently yanked, and these changes were included in 20.7. - -* Fix flit configuration, to include LICENSE files (`#357 `__) -* Make `intel` a recognized CPU architecture for the `universal` macOS platform tag (`#361 `__) -* Add some missing type hints to `packaging.requirements` (issue:`350`) - -20.5 - 2020-11-27 -~~~~~~~~~~~~~~~~~ - -* Officially support Python 3.9 (`#343 `__) -* Deprecate the ``LegacyVersion`` and ``LegacySpecifier`` classes (`#321 `__) -* Handle ``OSError`` on non-dynamic executables when attempting to resolve - the glibc version string. - -20.4 - 2020-05-19 -~~~~~~~~~~~~~~~~~ - -* Canonicalize version before comparing specifiers. (`#282 `__) -* Change type hint for ``canonicalize_name`` to return - ``packaging.utils.NormalizedName``. - This enables the use of static typing tools (like mypy) to detect mixing of - normalized and un-normalized names. - -20.3 - 2020-03-05 -~~~~~~~~~~~~~~~~~ - -* Fix changelog for 20.2. - -20.2 - 2020-03-05 -~~~~~~~~~~~~~~~~~ - -* Fix a bug that caused a 32-bit OS that runs on a 64-bit ARM CPU (e.g. ARM-v8, - aarch64), to report the wrong bitness. - -20.1 - 2020-01-24 -~~~~~~~~~~~~~~~~~~~ - -* Fix a bug caused by reuse of an exhausted iterator. (`#257 `__) - -20.0 - 2020-01-06 -~~~~~~~~~~~~~~~~~ - -* Add type hints (`#191 `__) - -* Add proper trove classifiers for PyPy support (`#198 `__) - -* Scale back depending on ``ctypes`` for manylinux support detection (`#171 `__) - -* Use ``sys.implementation.name`` where appropriate for ``packaging.tags`` (`#193 `__) - -* Expand upon the API provided by ``packaging.tags``: ``interpreter_name()``, ``mac_platforms()``, ``compatible_tags()``, ``cpython_tags()``, ``generic_tags()`` (`#187 `__) - -* Officially support Python 3.8 (`#232 `__) - -* Add ``major``, ``minor``, and ``micro`` aliases to ``packaging.version.Version`` (`#226 `__) - -* Properly mark ``packaging`` has being fully typed by adding a `py.typed` file (`#226 `__) - -19.2 - 2019-09-18 -~~~~~~~~~~~~~~~~~ - -* Remove dependency on ``attrs`` (`#178 `__, `#179 `__) - -* Use appropriate fallbacks for CPython ABI tag (`#181 `__, `#185 `__) - -* Add manylinux2014 support (`#186 `__) - -* Improve ABI detection (`#181 `__) - -* Properly handle debug wheels for Python 3.8 (`#172 `__) - -* Improve detection of debug builds on Windows (`#194 `__) - -19.1 - 2019-07-30 -~~~~~~~~~~~~~~~~~ - -* Add the ``packaging.tags`` module. (`#156 `__) - -* Correctly handle two-digit versions in ``python_version`` (`#119 `__) - - -19.0 - 2019-01-20 -~~~~~~~~~~~~~~~~~ - -* Fix string representation of PEP 508 direct URL requirements with markers. - -* Better handling of file URLs - - This allows for using ``file:///absolute/path``, which was previously - prevented due to the missing ``netloc``. - - This allows for all file URLs that ``urlunparse`` turns back into the - original URL to be valid. - - -18.0 - 2018-09-26 -~~~~~~~~~~~~~~~~~ - -* Improve error messages when invalid requirements are given. (`#129 `__) - - -17.1 - 2017-02-28 -~~~~~~~~~~~~~~~~~ - -* Fix ``utils.canonicalize_version`` when supplying non PEP 440 versions. - - -17.0 - 2017-02-28 -~~~~~~~~~~~~~~~~~ - -* Drop support for python 2.6, 3.2, and 3.3. - -* Define minimal pyparsing version to 2.0.2 (`#91 `__). - -* Add ``epoch``, ``release``, ``pre``, ``dev``, and ``post`` attributes to - ``Version`` and ``LegacyVersion`` (`#34 `__). - -* Add ``Version().is_devrelease`` and ``LegacyVersion().is_devrelease`` to - make it easy to determine if a release is a development release. - -* Add ``utils.canonicalize_version`` to canonicalize version strings or - ``Version`` instances (`#121 `__). - - -16.8 - 2016-10-29 -~~~~~~~~~~~~~~~~~ - -* Fix markers that utilize ``in`` so that they render correctly. - -* Fix an erroneous test on Python RC releases. - - -16.7 - 2016-04-23 -~~~~~~~~~~~~~~~~~ - -* Add support for the deprecated ``python_implementation`` marker which was - an undocumented setuptools marker in addition to the newer markers. - - -16.6 - 2016-03-29 -~~~~~~~~~~~~~~~~~ - -* Add support for the deprecated, PEP 345 environment markers in addition to - the newer markers. - - -16.5 - 2016-02-26 -~~~~~~~~~~~~~~~~~ - -* Fix a regression in parsing requirements with whitespaces between the comma - separators. - - -16.4 - 2016-02-22 -~~~~~~~~~~~~~~~~~ - -* Fix a regression in parsing requirements like ``foo (==4)``. - - -16.3 - 2016-02-21 -~~~~~~~~~~~~~~~~~ - -* Fix a bug where ``packaging.requirements:Requirement`` was overly strict when - matching legacy requirements. - - -16.2 - 2016-02-09 -~~~~~~~~~~~~~~~~~ - -* Add a function that implements the name canonicalization from PEP 503. - - -16.1 - 2016-02-07 -~~~~~~~~~~~~~~~~~ - -* Implement requirement specifiers from PEP 508. - - -16.0 - 2016-01-19 -~~~~~~~~~~~~~~~~~ - -* Relicense so that packaging is available under *either* the Apache License, - Version 2.0 or a 2 Clause BSD license. - -* Support installation of packaging when only distutils is available. - -* Fix ``==`` comparison when there is a prefix and a local version in play. - (`#41 `__). - -* Implement environment markers from PEP 508. - - -15.3 - 2015-08-01 -~~~~~~~~~~~~~~~~~ - -* Normalize post-release spellings for rev/r prefixes. `#35 `__ - - -15.2 - 2015-05-13 -~~~~~~~~~~~~~~~~~ - -* Fix an error where the arbitrary specifier (``===``) was not correctly - allowing pre-releases when it was being used. - -* Expose the specifier and version parts through properties on the - ``Specifier`` classes. - -* Allow iterating over the ``SpecifierSet`` to get access to all of the - ``Specifier`` instances. - -* Allow testing if a version is contained within a specifier via the ``in`` - operator. - - -15.1 - 2015-04-13 -~~~~~~~~~~~~~~~~~ - -* Fix a logic error that was causing inconsistent answers about whether or not - a pre-release was contained within a ``SpecifierSet`` or not. - - -15.0 - 2015-01-02 -~~~~~~~~~~~~~~~~~ - -* Add ``Version().is_postrelease`` and ``LegacyVersion().is_postrelease`` to - make it easy to determine if a release is a post release. - -* Add ``Version().base_version`` and ``LegacyVersion().base_version`` to make - it easy to get the public version without any pre or post release markers. - -* Support the update to PEP 440 which removed the implied ``!=V.*`` when using - either ``>V`` or ``V`` or ````) operator. - - -14.3 - 2014-11-19 -~~~~~~~~~~~~~~~~~ - -* **BACKWARDS INCOMPATIBLE** Refactor specifier support so that it can sanely - handle legacy specifiers as well as PEP 440 specifiers. - -* **BACKWARDS INCOMPATIBLE** Move the specifier support out of - ``packaging.version`` into ``packaging.specifiers``. - - -14.2 - 2014-09-10 -~~~~~~~~~~~~~~~~~ - -* Add prerelease support to ``Specifier``. -* Remove the ability to do ``item in Specifier()`` and replace it with - ``Specifier().contains(item)`` in order to allow flags that signal if a - prerelease should be accepted or not. -* Add a method ``Specifier().filter()`` which will take an iterable and returns - an iterable with items that do not match the specifier filtered out. - - -14.1 - 2014-09-08 -~~~~~~~~~~~~~~~~~ - -* Allow ``LegacyVersion`` and ``Version`` to be sorted together. -* Add ``packaging.version.parse()`` to enable easily parsing a version string - as either a ``Version`` or a ``LegacyVersion`` depending on it's PEP 440 - validity. - - -14.0 - 2014-09-05 -~~~~~~~~~~~~~~~~~ - -* Initial release. - - -.. _`master`: https://github.com/pypa/packaging/ - - diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/RECORD b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/RECORD deleted file mode 100644 index 870a8eb17..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/RECORD +++ /dev/null @@ -1,19 +0,0 @@ -packaging/__about__.py,sha256=ugASIO2w1oUyH8_COqQ2X_s0rDhjbhQC3yJocD03h2c,661 -packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497 -packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488 -packaging/_musllinux.py,sha256=_KGgY_qc7vhMGpoqss25n2hiLCNKRtvz9mCrS7gkqyc,4378 -packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 -packaging/markers.py,sha256=Fygi3_eZnjQ-3VJizW5AhI5wvo0Hb6RMk4DidsKpOC0,8475 -packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -packaging/requirements.py,sha256=rjaGRCMepZS1mlYMjJ5Qh6rfq3gtsCRQUQmftGZ_bu8,4664 -packaging/specifiers.py,sha256=LRQ0kFsHrl5qfcFNEEJrIFYsnIHQUJXY9fIsakTrrqE,30110 -packaging/tags.py,sha256=lmsnGNiJ8C4D_Pf9PbM0qgbZvD9kmB9lpZBQUZa3R_Y,15699 -packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200 -packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665 -packaging-21.3.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 -packaging-21.3.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 -packaging-21.3.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 -packaging-21.3.dist-info/METADATA,sha256=KuKIy6qDLP3svIt6ejCbxBDhvq11ebkgUN55MeyKFyc,15147 -packaging-21.3.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 -packaging-21.3.dist-info/top_level.txt,sha256=zFdHrhWnPslzsiP455HutQsqPB6v0KCtNUMtUtrefDw,10 -packaging-21.3.dist-info/RECORD,, diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL deleted file mode 100644 index 5bad85fdc..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.0) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/requires.txt b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/requires.txt deleted file mode 100644 index f6e4a46ef..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/requires.txt +++ /dev/null @@ -1 +0,0 @@ -pyparsing!=3.0.5,>=2.0.2 diff --git a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/top_level.txt b/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/top_level.txt deleted file mode 100644 index 748809f75..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/EGG-INFO/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -packaging diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/__about__.py b/.eggs/packaging-21.3-py3.8.egg/packaging/__about__.py deleted file mode 100644 index 3551bc2d2..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/__about__.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "21.3" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD-2-Clause or Apache-2.0" -__copyright__ = "2014-2019 %s" % __author__ diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/__init__.py b/.eggs/packaging-21.3-py3.8.egg/packaging/__init__.py deleted file mode 100644 index 3c50c5dcf..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -from .__about__ import ( - __author__, - __copyright__, - __email__, - __license__, - __summary__, - __title__, - __uri__, - __version__, -) - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/_manylinux.py b/.eggs/packaging-21.3-py3.8.egg/packaging/_manylinux.py deleted file mode 100644 index 4c379aa6f..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/_manylinux.py +++ /dev/null @@ -1,301 +0,0 @@ -import collections -import functools -import os -import re -import struct -import sys -import warnings -from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple - - -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader: - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file: IO[bytes]) -> None: - def unpack(fmt: str) -> int: - try: - data = file.read(struct.calcsize(fmt)) - result: Tuple[int, ...] = struct.unpack(fmt, data) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result[0] - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "H" - format_i = "I" - format_q = "Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header() -> Optional[_ELFFileHeader]: - try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header - - -def _is_linux_armhf() -> bool: - # hard-float ABI can be detected from the ELF header of the running - # process - # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result - - -def _is_linux_i686() -> bool: - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result - - -def _have_compatible_abi(arch: str) -> bool: - if arch == "armv7l": - return _is_linux_armhf() - if arch == "i686": - return _is_linux_i686() - return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} - - -# If glibc ever changes its major version, we need to know what the last -# minor version was, so we can build the complete list of all versions. -# For now, guess what the highest minor version might be, assume it will -# be 50 for testing. Once this actually happens, update the dictionary -# with the actual value. -_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) - - -class _GLibCVersion(NamedTuple): - major: int - minor: int - - -def _glibc_version_string_confstr() -> Optional[str]: - """ - Primary implementation of glibc_version_string using os.confstr. - """ - # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely - # to be broken or missing. This strategy is used in the standard library - # platform module. - # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 - try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr("CS_GNU_LIBC_VERSION") - assert version_string is not None - _, version = version_string.split() - except (AssertionError, AttributeError, OSError, ValueError): - # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... - return None - return version - - -def _glibc_version_string_ctypes() -> Optional[str]: - """ - Fallback implementation of glibc_version_string using ctypes. - """ - try: - import ctypes - except ImportError: - return None - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - # - # We must also handle the special case where the executable is not a - # dynamically linked executable. This can occur when using musl libc, - # for example. In this situation, dlopen() will error, leading to an - # OSError. Interestingly, at least in the case of musl, there is no - # errno set on the OSError. The single string argument used to construct - # OSError comes from libc itself and is therefore not portable to - # hard code here. In any case, failure to call dlopen() means we - # can proceed, so we bail on our attempt. - try: - process_namespace = ctypes.CDLL(None) - except OSError: - return None - - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str: str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -def _glibc_version_string() -> Optional[str]: - """Returns glibc version string, or None if not using glibc.""" - return _glibc_version_string_confstr() or _glibc_version_string_ctypes() - - -def _parse_glibc_version(version_str: str) -> Tuple[int, int]: - """Parse glibc version. - - We use a regexp instead of str.split because we want to discard any - random junk that might come after the minor version -- this might happen - in patched/forked versions of glibc (e.g. Linaro's version of glibc - uses version strings like "2.20-2014.11"). See gh-3588. - """ - m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) - if not m: - warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, - RuntimeWarning, - ) - return -1, -1 - return int(m.group("major")), int(m.group("minor")) - - -@functools.lru_cache() -def _get_glibc_version() -> Tuple[int, int]: - version_str = _glibc_version_string() - if version_str is None: - return (-1, -1) - return _parse_glibc_version(version_str) - - -# From PEP 513, PEP 600 -def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: - sys_glibc = _get_glibc_version() - if sys_glibc < version: - return False - # Check for presence of _manylinux module. - try: - import _manylinux # noqa - except ImportError: - return True - if hasattr(_manylinux, "manylinux_compatible"): - result = _manylinux.manylinux_compatible(version[0], version[1], arch) - if result is not None: - return bool(result) - return True - if version == _GLibCVersion(2, 5): - if hasattr(_manylinux, "manylinux1_compatible"): - return bool(_manylinux.manylinux1_compatible) - if version == _GLibCVersion(2, 12): - if hasattr(_manylinux, "manylinux2010_compatible"): - return bool(_manylinux.manylinux2010_compatible) - if version == _GLibCVersion(2, 17): - if hasattr(_manylinux, "manylinux2014_compatible"): - return bool(_manylinux.manylinux2014_compatible) - return True - - -_LEGACY_MANYLINUX_MAP = { - # CentOS 7 w/ glibc 2.17 (PEP 599) - (2, 17): "manylinux2014", - # CentOS 6 w/ glibc 2.12 (PEP 571) - (2, 12): "manylinux2010", - # CentOS 5 w/ glibc 2.5 (PEP 513) - (2, 5): "manylinux1", -} - - -def platform_tags(linux: str, arch: str) -> Iterator[str]: - if not _have_compatible_abi(arch): - return - # Oldest glibc to be supported regardless of architecture is (2, 17). - too_old_glibc2 = _GLibCVersion(2, 16) - if arch in {"x86_64", "i686"}: - # On x86/i686 also oldest glibc to be supported is (2, 5). - too_old_glibc2 = _GLibCVersion(2, 4) - current_glibc = _GLibCVersion(*_get_glibc_version()) - glibc_max_list = [current_glibc] - # We can assume compatibility across glibc major versions. - # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 - # - # Build a list of maximum glibc versions so that we can - # output the canonical list of all glibc from current_glibc - # down to too_old_glibc2, including all intermediary versions. - for glibc_major in range(current_glibc.major - 1, 1, -1): - glibc_minor = _LAST_GLIBC_MINOR[glibc_major] - glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) - for glibc_max in glibc_max_list: - if glibc_max.major == too_old_glibc2.major: - min_minor = too_old_glibc2.minor - else: - # For other glibc major versions oldest supported is (x, 0). - min_minor = -1 - for glibc_minor in range(glibc_max.minor, min_minor, -1): - glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) - tag = "manylinux_{}_{}".format(*glibc_version) - if _is_compatible(tag, arch, glibc_version): - yield linux.replace("linux", tag) - # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. - if glibc_version in _LEGACY_MANYLINUX_MAP: - legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] - if _is_compatible(legacy_tag, arch, glibc_version): - yield linux.replace("linux", legacy_tag) diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/_musllinux.py b/.eggs/packaging-21.3-py3.8.egg/packaging/_musllinux.py deleted file mode 100644 index 8ac3059ba..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/_musllinux.py +++ /dev/null @@ -1,136 +0,0 @@ -"""PEP 656 support. - -This module implements logic to detect if the currently running Python is -linked against musl, and what musl version is used. -""" - -import contextlib -import functools -import operator -import os -import re -import struct -import subprocess -import sys -from typing import IO, Iterator, NamedTuple, Optional, Tuple - - -def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: - return struct.unpack(fmt, f.read(struct.calcsize(fmt))) - - -def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: - """Detect musl libc location by parsing the Python executable. - - Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca - ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html - """ - f.seek(0) - try: - ident = _read_unpacked(f, "16B") - except struct.error: - return None - if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. - return None - f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. - - try: - # e_fmt: Format for program header. - # p_fmt: Format for section header. - # p_idx: Indexes to find p_type, p_offset, and p_filesz. - e_fmt, p_fmt, p_idx = { - 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. - 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. - }[ident[4]] - except KeyError: - return None - else: - p_get = operator.itemgetter(*p_idx) - - # Find the interpreter section and return its content. - try: - _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) - except struct.error: - return None - for i in range(e_phnum + 1): - f.seek(e_phoff + e_phentsize * i) - try: - p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) - except struct.error: - return None - if p_type != 3: # Not PT_INTERP. - continue - f.seek(p_offset) - interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") - if "musl" not in interpreter: - return None - return interpreter - return None - - -class _MuslVersion(NamedTuple): - major: int - minor: int - - -def _parse_musl_version(output: str) -> Optional[_MuslVersion]: - lines = [n for n in (n.strip() for n in output.splitlines()) if n] - if len(lines) < 2 or lines[0][:4] != "musl": - return None - m = re.match(r"Version (\d+)\.(\d+)", lines[1]) - if not m: - return None - return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) - - -@functools.lru_cache() -def _get_musl_version(executable: str) -> Optional[_MuslVersion]: - """Detect currently-running musl runtime version. - - This is done by checking the specified executable's dynamic linking - information, and invoking the loader to parse its output for a version - string. If the loader is musl, the output would be something like:: - - musl libc (x86_64) - Version 1.2.2 - Dynamic Program Loader - """ - with contextlib.ExitStack() as stack: - try: - f = stack.enter_context(open(executable, "rb")) - except OSError: - return None - ld = _parse_ld_musl_from_elf(f) - if not ld: - return None - proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) - return _parse_musl_version(proc.stderr) - - -def platform_tags(arch: str) -> Iterator[str]: - """Generate musllinux tags compatible to the current platform. - - :param arch: Should be the part of platform tag after the ``linux_`` - prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a - prerequisite for the current platform to be musllinux-compatible. - - :returns: An iterator of compatible musllinux tags. - """ - sys_musl = _get_musl_version(sys.executable) - if sys_musl is None: # Python not dynamically linked against musl. - return - for minor in range(sys_musl.minor, -1, -1): - yield f"musllinux_{sys_musl.major}_{minor}_{arch}" - - -if __name__ == "__main__": # pragma: no cover - import sysconfig - - plat = sysconfig.get_platform() - assert plat.startswith("linux-"), "not linux" - - print("plat:", plat) - print("musl:", _get_musl_version(sys.executable)) - print("tags:", end=" ") - for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): - print(t, end="\n ") diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/_structures.py b/.eggs/packaging-21.3-py3.8.egg/packaging/_structures.py deleted file mode 100644 index 90a6465f9..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/_structures.py +++ /dev/null @@ -1,61 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - - -class InfinityType: - def __repr__(self) -> str: - return "Infinity" - - def __hash__(self) -> int: - return hash(repr(self)) - - def __lt__(self, other: object) -> bool: - return False - - def __le__(self, other: object) -> bool: - return False - - def __eq__(self, other: object) -> bool: - return isinstance(other, self.__class__) - - def __gt__(self, other: object) -> bool: - return True - - def __ge__(self, other: object) -> bool: - return True - - def __neg__(self: object) -> "NegativeInfinityType": - return NegativeInfinity - - -Infinity = InfinityType() - - -class NegativeInfinityType: - def __repr__(self) -> str: - return "-Infinity" - - def __hash__(self) -> int: - return hash(repr(self)) - - def __lt__(self, other: object) -> bool: - return True - - def __le__(self, other: object) -> bool: - return True - - def __eq__(self, other: object) -> bool: - return isinstance(other, self.__class__) - - def __gt__(self, other: object) -> bool: - return False - - def __ge__(self, other: object) -> bool: - return False - - def __neg__(self: object) -> InfinityType: - return Infinity - - -NegativeInfinity = NegativeInfinityType() diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/markers.py b/.eggs/packaging-21.3-py3.8.egg/packaging/markers.py deleted file mode 100644 index cb640e8f9..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/markers.py +++ /dev/null @@ -1,304 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import operator -import os -import platform -import sys -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -from pyparsing import ( # noqa: N817 - Forward, - Group, - Literal as L, - ParseException, - ParseResults, - QuotedString, - ZeroOrMore, - stringEnd, - stringStart, -) - -from .specifiers import InvalidSpecifier, Specifier - -__all__ = [ - "InvalidMarker", - "UndefinedComparison", - "UndefinedEnvironmentName", - "Marker", - "default_environment", -] - -Operator = Callable[[str, str], bool] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -class Node: - def __init__(self, value: Any) -> None: - self.value = value - - def __str__(self) -> str: - return str(self.value) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__}('{self}')>" - - def serialize(self) -> str: - raise NotImplementedError - - -class Variable(Node): - def serialize(self) -> str: - return str(self) - - -class Value(Node): - def serialize(self) -> str: - return f'"{self}"' - - -class Op(Node): - def serialize(self) -> str: - return str(self) - - -VARIABLE = ( - L("implementation_version") - | L("platform_python_implementation") - | L("implementation_name") - | L("python_full_version") - | L("platform_release") - | L("platform_version") - | L("platform_machine") - | L("platform_system") - | L("python_version") - | L("sys_platform") - | L("os_name") - | L("os.name") # PEP-345 - | L("sys.platform") # PEP-345 - | L("platform.version") # PEP-345 - | L("platform.machine") # PEP-345 - | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # undocumented setuptools legacy - | L("extra") # PEP-508 -) -ALIASES = { - "os.name": "os_name", - "sys.platform": "sys_platform", - "platform.version": "platform_version", - "platform.machine": "platform_machine", - "platform.python_implementation": "platform_python_implementation", - "python_implementation": "platform_python_implementation", -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results - - -def _format_marker( - marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True -) -> str: - - assert isinstance(marker, (list, tuple, str)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if ( - isinstance(marker, list) - and len(marker) == 1 - and isinstance(marker[0], (list, tuple)) - ): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators: Dict[str, Operator] = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs: str, op: Op, rhs: str) -> bool: - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs) - - oper: Optional[Operator] = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") - - return oper(lhs, rhs) - - -class Undefined: - pass - - -_undefined = Undefined() - - -def _get_env(environment: Dict[str, str], name: str) -> str: - value: Union[str, Undefined] = environment.get(name, _undefined) - - if isinstance(value, Undefined): - raise UndefinedEnvironmentName( - f"{name!r} does not exist in evaluation environment." - ) - - return value - - -def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: - groups: List[List[bool]] = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, str)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) - rhs_value = rhs.value - else: - lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) - - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info: "sys._version_info") -> str: - version = "{0.major}.{0.minor}.{0.micro}".format(info) - kind = info.releaselevel - if kind != "final": - version += kind[0] + str(info.serial) - return version - - -def default_environment() -> Dict[str, str]: - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": ".".join(platform.python_version_tuple()[:2]), - "sys_platform": sys.platform, - } - - -class Marker: - def __init__(self, marker: str) -> None: - try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - raise InvalidMarker( - f"Invalid marker: {marker!r}, parse error at " - f"{marker[e.loc : e.loc + 8]!r}" - ) - - def __str__(self) -> str: - return _format_marker(self._markers) - - def __repr__(self) -> str: - return f"" - - def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - if environment is not None: - current_environment.update(environment) - - return _evaluate_markers(self._markers, current_environment) diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/py.typed b/.eggs/packaging-21.3-py3.8.egg/packaging/py.typed deleted file mode 100644 index e69de29bb..000000000 diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/requirements.py b/.eggs/packaging-21.3-py3.8.egg/packaging/requirements.py deleted file mode 100644 index 53f9a3aa4..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/requirements.py +++ /dev/null @@ -1,146 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -import string -import urllib.parse -from typing import List, Optional as TOptional, Set - -from pyparsing import ( # noqa - Combine, - Literal as L, - Optional, - ParseException, - Regex, - Word, - ZeroOrMore, - originalTextFor, - stringEnd, - stringStart, -) - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet - - -class InvalidRequirement(ValueError): - """ - An invalid requirement was found, users should refer to PEP 508. - """ - - -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r"[^ ]+")("url") -URL = AT + URI - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine( - VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False -)("_raw_spec") -_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start : t._original_end]) -) -MARKER_SEPARATOR = SEMICOLON -MARKER = MARKER_SEPARATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd -# pyparsing isn't thread safe during initialization, so we do it eagerly, see -# issue #104 -REQUIREMENT.parseString("x[]") - - -class Requirement: - """Parse a requirement. - - Parse a given requirement string into its parts, such as name, specifier, - URL, and extras. Raises InvalidRequirement on a badly-formed requirement - string. - """ - - # TODO: Can we test whether something is contained within a requirement? - # If so how do we do that? Do we need to test against the _name_ of - # the thing as well as the version? What about the markers? - # TODO: Can we normalize the name and extra name? - - def __init__(self, requirement_string: str) -> None: - try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' - ) - - self.name: str = req.name - if req.url: - parsed_url = urllib.parse.urlparse(req.url) - if parsed_url.scheme == "file": - if urllib.parse.urlunparse(parsed_url) != req.url: - raise InvalidRequirement("Invalid URL given") - elif not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc - ): - raise InvalidRequirement(f"Invalid URL: {req.url}") - self.url: TOptional[str] = req.url - else: - self.url = None - self.extras: Set[str] = set(req.extras.asList() if req.extras else []) - self.specifier: SpecifierSet = SpecifierSet(req.specifier) - self.marker: TOptional[Marker] = req.marker if req.marker else None - - def __str__(self) -> str: - parts: List[str] = [self.name] - - if self.extras: - formatted_extras = ",".join(sorted(self.extras)) - parts.append(f"[{formatted_extras}]") - - if self.specifier: - parts.append(str(self.specifier)) - - if self.url: - parts.append(f"@ {self.url}") - if self.marker: - parts.append(" ") - - if self.marker: - parts.append(f"; {self.marker}") - - return "".join(parts) - - def __repr__(self) -> str: - return f"" diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/specifiers.py b/.eggs/packaging-21.3-py3.8.egg/packaging/specifiers.py deleted file mode 100644 index 0e218a6f9..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/specifiers.py +++ /dev/null @@ -1,802 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import abc -import functools -import itertools -import re -import warnings -from typing import ( - Callable, - Dict, - Iterable, - Iterator, - List, - Optional, - Pattern, - Set, - Tuple, - TypeVar, - Union, -) - -from .utils import canonicalize_version -from .version import LegacyVersion, Version, parse - -ParsedVersion = Union[Version, LegacyVersion] -UnparsedVersion = Union[Version, LegacyVersion, str] -VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) -CallableOperator = Callable[[ParsedVersion, str], bool] - - -class InvalidSpecifier(ValueError): - """ - An invalid specifier was found, users should refer to PEP 440. - """ - - -class BaseSpecifier(metaclass=abc.ABCMeta): - @abc.abstractmethod - def __str__(self) -> str: - """ - Returns the str representation of this Specifier like object. This - should be representative of the Specifier itself. - """ - - @abc.abstractmethod - def __hash__(self) -> int: - """ - Returns a hash value for this Specifier like object. - """ - - @abc.abstractmethod - def __eq__(self, other: object) -> bool: - """ - Returns a boolean representing whether or not the two Specifier like - objects are equal. - """ - - @abc.abstractproperty - def prereleases(self) -> Optional[bool]: - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @prereleases.setter - def prereleases(self, value: bool) -> None: - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @abc.abstractmethod - def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: - """ - Determines if the given item is contained within this specifier. - """ - - @abc.abstractmethod - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - """ - Takes an iterable of items and filters them so that only items which - are contained within this specifier are allowed in it. - """ - - -class _IndividualSpecifier(BaseSpecifier): - - _operators: Dict[str, str] = {} - _regex: Pattern[str] - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier(f"Invalid specifier: '{spec}'") - - self._spec: Tuple[str, str] = ( - match.group("operator").strip(), - match.group("version").strip(), - ) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return f"<{self.__class__.__name__}({str(self)!r}{pre})>" - - def __str__(self) -> str: - return "{}{}".format(*self._spec) - - @property - def _canonical_spec(self) -> Tuple[str, str]: - return self._spec[0], canonicalize_version(self._spec[1]) - - def __hash__(self) -> int: - return hash(self._canonical_spec) - - def __eq__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._canonical_spec == other._canonical_spec - - def _get_operator(self, op: str) -> CallableOperator: - operator_callable: CallableOperator = getattr( - self, f"_compare_{self._operators[op]}" - ) - return operator_callable - - def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self) -> str: - return self._spec[0] - - @property - def version(self) -> str: - return self._spec[1] - - @property - def prereleases(self) -> Optional[bool]: - return self._prereleases - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: str) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - normalized_item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if normalized_item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - operator_callable: CallableOperator = self._get_operator(self.operator) - return operator_callable(normalized_item, self.version) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later in case nothing - # else matches this specifier. - if parsed_version.is_prerelease and not ( - prereleases or self.prereleases - ): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the beginning. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(==|!=|<=|>=|<|>)) - \s* - (?P - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - super().__init__(spec, prereleases) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal( - self, prospective: LegacyVersion, spec: str - ) -> bool: - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective > self._coerce_version(spec) - - -def _require_version_compare( - fn: Callable[["Specifier", ParsedVersion, str], bool] -) -> Callable[["Specifier", ParsedVersion, str], bool]: - @functools.wraps(fn) - def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - - return wrapped - - -class Specifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(~=|==|!=|<=|>=|<|>|===)) - (?P - (?: - # The identity operators allow for an escape hatch that will - # do an exact string match of the version you wish to install. - # This will not be parsed by PEP 440 and we cannot determine - # any semantic meaning from it. This operator is discouraged - # but included entirely as an escape hatch. - (?<====) # Only match for the identity operator - \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. - ) - | - (?: - # The (non)equality operators allow for wild card and local - # versions to be specified so we have to define these two - # operators separately to enable that. - (?<===|!=) # Only match for equals and not equals - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. - (?: - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* - )? - ) - | - (?: - # The compatible operator requires at least two digits in the - # release segment. - (?<=~=) # Only match for the compatible operator - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - | - (?: - # All other operators only allow a sub set of what the - # (non)equality operators do. Specifically they do not allow - # local versions to be specified nor do they allow the prefix - # matching wild cards. - (?=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - "===": "arbitrary", - } - - @_require_version_compare - def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: - - # Compatible releases have an equivalent combination of >= and ==. That - # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to - # implement this in terms of the other specifiers instead of - # implementing it ourselves. The only thing we need to do is construct - # the other specifiers. - - # We want everything but the last item in the version, but we want to - # ignore suffix segments. - prefix = ".".join( - list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] - ) - - # Add the prefix notation to the end of our string - prefix += ".*" - - return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( - prospective, prefix - ) - - @_require_version_compare - def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: - - # We need special logic to handle prefix matching - if spec.endswith(".*"): - # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. - split_spec = _version_split(spec[:-2]) # Remove the trailing .* - - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. - split_prospective = _version_split(str(prospective)) - - # Shorten the prospective version to be the same length as the spec - # so that we can determine if the specifier is a prefix of the - # prospective version or not. - shortened_prospective = split_prospective[: len(split_spec)] - - # Pad out our two sides with zeros so that they both equal the same - # length. - padded_spec, padded_prospective = _pad_version( - split_spec, shortened_prospective - ) - - return padded_prospective == padded_spec - else: - # Convert our spec string into a Version - spec_version = Version(spec) - - # If the specifier does not have a local segment, then we want to - # act as if the prospective version also does not have a local - # segment. - if not spec_version.local: - prospective = Version(prospective.public) - - return prospective == spec_version - - @_require_version_compare - def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: - return not self._compare_equal(prospective, spec) - - @_require_version_compare - def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: - - # NB: Local version identifiers are NOT permitted in the version - # specifier, so local version labels can be universally removed from - # the prospective version. - return Version(prospective.public) <= Version(spec) - - @_require_version_compare - def _compare_greater_than_equal( - self, prospective: ParsedVersion, spec: str - ) -> bool: - - # NB: Local version identifiers are NOT permitted in the version - # specifier, so local version labels can be universally removed from - # the prospective version. - return Version(prospective.public) >= Version(spec) - - @_require_version_compare - def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: - - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec_str) - - # Check to see if the prospective version is less than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective < spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a pre-release version, that we do not accept pre-release - # versions for the version mentioned in the specifier (e.g. <3.1 should - # not match 3.1.dev0, but should match 3.0.dev0). - if not spec.is_prerelease and prospective.is_prerelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # less than the spec version *and* it's not a pre-release of the same - # version in the spec. - return True - - @_require_version_compare - def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: - - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec_str) - - # Check to see if the prospective version is greater than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective > spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a post-release version, that we do not accept - # post-release versions for the version mentioned in the specifier - # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). - if not spec.is_postrelease and prospective.is_postrelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # Ensure that we do not allow a local version of the version mentioned - # in the specifier, which is technically greater than, to match. - if prospective.local is not None: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # greater than the spec version *and* it's not a pre-release of the - # same version in the spec. - return True - - def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: - return str(prospective).lower() == str(spec).lower() - - @property - def prereleases(self) -> bool: - - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases - - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] - - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True - - return False - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - -_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") - - -def _version_split(version: str) -> List[str]: - result: List[str] = [] - for item in version.split("."): - match = _prefix_regex.search(item) - if match: - result.extend(match.groups()) - else: - result.append(item) - return result - - -def _is_not_suffix(segment: str) -> bool: - return not any( - segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") - ) - - -def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: - left_split, right_split = [], [] - - # Get the release segment of our versions - left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) - right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) - - # Get the rest of our versions - left_split.append(left[len(left_split[0]) :]) - right_split.append(right[len(right_split[0]) :]) - - # Insert our padding - left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) - right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) - - return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) - - -class SpecifierSet(BaseSpecifier): - def __init__( - self, specifiers: str = "", prereleases: Optional[bool] = None - ) -> None: - - # Split on , to break each individual specifier into it's own item, and - # strip each item to remove leading/trailing whitespace. - split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - - # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed: Set[_IndividualSpecifier] = set() - for specifier in split_specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) - - # Store our prereleases value so we can use it later to determine if - # we accept prereleases or not. - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return f"" - - def __str__(self) -> str: - return ",".join(sorted(str(s) for s in self._specs)) - - def __hash__(self) -> int: - return hash(self._specs) - - def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": - if isinstance(other, str): - other = SpecifierSet(other) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - specifier = SpecifierSet() - specifier._specs = frozenset(self._specs | other._specs) - - if self._prereleases is None and other._prereleases is not None: - specifier._prereleases = other._prereleases - elif self._prereleases is not None and other._prereleases is None: - specifier._prereleases = self._prereleases - elif self._prereleases == other._prereleases: - specifier._prereleases = self._prereleases - else: - raise ValueError( - "Cannot combine SpecifierSets with True and False prerelease " - "overrides." - ) - - return specifier - - def __eq__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs == other._specs - - def __len__(self) -> int: - return len(self._specs) - - def __iter__(self) -> Iterator[_IndividualSpecifier]: - return iter(self._specs) - - @property - def prereleases(self) -> Optional[bool]: - - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: UnparsedVersion) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # We can determine if we're going to allow pre-releases by looking to - # see if any of the underlying items supports them. If none of them do - # and this item is a pre-release then we do not allow it and we can - # short circuit that here. - # Note: This means that 1.0.dev1 would not be contained in something - # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 - if not prereleases and item.is_prerelease: - return False - - # We simply dispatch to the underlying specs here to make sure that the - # given version is contained within all of them. - # Note: This use of all() here means that an empty set of specifiers - # will always return True, this is an explicit design decision. - return all(s.contains(item, prereleases=prereleases) for s in self._specs) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # If we have any specifiers, then we want to wrap our iterable in the - # filter method for each one, this will act as a logical AND amongst - # each specifier. - if self._specs: - for spec in self._specs: - iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable - # If we do not have any specifiers, then we need to have a rough filter - # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. - else: - filtered: List[VersionTypeVar] = [] - found_prereleases: List[VersionTypeVar] = [] - - item: UnparsedVersion - parsed_version: Union[Version, LegacyVersion] - - for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue - - # Store any item which is a pre-release for later unless we've - # already found a final version or we are accepting prereleases - if parsed_version.is_prerelease and not prereleases: - if not filtered: - found_prereleases.append(item) - else: - filtered.append(item) - - # If we've found no items except for pre-releases, then we'll go - # ahead and use the pre-releases - if not filtered and found_prereleases and prereleases is None: - return found_prereleases - - return filtered diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/tags.py b/.eggs/packaging-21.3-py3.8.egg/packaging/tags.py deleted file mode 100644 index 9a3d25a71..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/tags.py +++ /dev/null @@ -1,487 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import logging -import platform -import sys -import sysconfig -from importlib.machinery import EXTENSION_SUFFIXES -from typing import ( - Dict, - FrozenSet, - Iterable, - Iterator, - List, - Optional, - Sequence, - Tuple, - Union, - cast, -) - -from . import _manylinux, _musllinux - -logger = logging.getLogger(__name__) - -PythonVersion = Sequence[int] -MacVersion = Tuple[int, int] - -INTERPRETER_SHORT_NAMES: Dict[str, str] = { - "python": "py", # Generic. - "cpython": "cp", - "pypy": "pp", - "ironpython": "ip", - "jython": "jy", -} - - -_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 - - -class Tag: - """ - A representation of the tag triple for a wheel. - - Instances are considered immutable and thus are hashable. Equality checking - is also supported. - """ - - __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] - - def __init__(self, interpreter: str, abi: str, platform: str) -> None: - self._interpreter = interpreter.lower() - self._abi = abi.lower() - self._platform = platform.lower() - # The __hash__ of every single element in a Set[Tag] will be evaluated each time - # that a set calls its `.disjoint()` method, which may be called hundreds of - # times when scanning a page of links for packages with tags matching that - # Set[Tag]. Pre-computing the value here produces significant speedups for - # downstream consumers. - self._hash = hash((self._interpreter, self._abi, self._platform)) - - @property - def interpreter(self) -> str: - return self._interpreter - - @property - def abi(self) -> str: - return self._abi - - @property - def platform(self) -> str: - return self._platform - - def __eq__(self, other: object) -> bool: - if not isinstance(other, Tag): - return NotImplemented - - return ( - (self._hash == other._hash) # Short-circuit ASAP for perf reasons. - and (self._platform == other._platform) - and (self._abi == other._abi) - and (self._interpreter == other._interpreter) - ) - - def __hash__(self) -> int: - return self._hash - - def __str__(self) -> str: - return f"{self._interpreter}-{self._abi}-{self._platform}" - - def __repr__(self) -> str: - return f"<{self} @ {id(self)}>" - - -def parse_tag(tag: str) -> FrozenSet[Tag]: - """ - Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. - - Returning a set is required due to the possibility that the tag is a - compressed tag set. - """ - tags = set() - interpreters, abis, platforms = tag.split("-") - for interpreter in interpreters.split("."): - for abi in abis.split("."): - for platform_ in platforms.split("."): - tags.add(Tag(interpreter, abi, platform_)) - return frozenset(tags) - - -def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: - value = sysconfig.get_config_var(name) - if value is None and warn: - logger.debug( - "Config variable '%s' is unset, Python ABI tag may be incorrect", name - ) - return value - - -def _normalize_string(string: str) -> str: - return string.replace(".", "_").replace("-", "_") - - -def _abi3_applies(python_version: PythonVersion) -> bool: - """ - Determine if the Python version supports abi3. - - PEP 384 was first implemented in Python 3.2. - """ - return len(python_version) > 1 and tuple(python_version) >= (3, 2) - - -def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: - py_version = tuple(py_version) # To allow for version comparison. - abis = [] - version = _version_nodot(py_version[:2]) - debug = pymalloc = ucs4 = "" - with_debug = _get_config_var("Py_DEBUG", warn) - has_refcount = hasattr(sys, "gettotalrefcount") - # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled - # extension modules is the best option. - # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 - has_ext = "_d.pyd" in EXTENSION_SUFFIXES - if with_debug or (with_debug is None and (has_refcount or has_ext)): - debug = "d" - if py_version < (3, 8): - with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) - if with_pymalloc or with_pymalloc is None: - pymalloc = "m" - if py_version < (3, 3): - unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) - if unicode_size == 4 or ( - unicode_size is None and sys.maxunicode == 0x10FFFF - ): - ucs4 = "u" - elif debug: - # Debug builds can also load "normal" extension modules. - # We can also assume no UCS-4 or pymalloc requirement. - abis.append(f"cp{version}") - abis.insert( - 0, - "cp{version}{debug}{pymalloc}{ucs4}".format( - version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 - ), - ) - return abis - - -def cpython_tags( - python_version: Optional[PythonVersion] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a CPython interpreter. - - The tags consist of: - - cp-- - - cp-abi3- - - cp-none- - - cp-abi3- # Older Python versions down to 3.2. - - If python_version only specifies a major version then user-provided ABIs and - the 'none' ABItag will be used. - - If 'abi3' or 'none' are specified in 'abis' then they will be yielded at - their normal position and not at the beginning. - """ - if not python_version: - python_version = sys.version_info[:2] - - interpreter = f"cp{_version_nodot(python_version[:2])}" - - if abis is None: - if len(python_version) > 1: - abis = _cpython_abis(python_version, warn) - else: - abis = [] - abis = list(abis) - # 'abi3' and 'none' are explicitly handled later. - for explicit_abi in ("abi3", "none"): - try: - abis.remove(explicit_abi) - except ValueError: - pass - - platforms = list(platforms or platform_tags()) - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - if _abi3_applies(python_version): - yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) - yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) - - if _abi3_applies(python_version): - for minor_version in range(python_version[1] - 1, 1, -1): - for platform_ in platforms: - interpreter = "cp{version}".format( - version=_version_nodot((python_version[0], minor_version)) - ) - yield Tag(interpreter, "abi3", platform_) - - -def _generic_abi() -> Iterator[str]: - abi = sysconfig.get_config_var("SOABI") - if abi: - yield _normalize_string(abi) - - -def generic_tags( - interpreter: Optional[str] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a generic interpreter. - - The tags consist of: - - -- - - The "none" ABI will be added if it was not explicitly provided. - """ - if not interpreter: - interp_name = interpreter_name() - interp_version = interpreter_version(warn=warn) - interpreter = "".join([interp_name, interp_version]) - if abis is None: - abis = _generic_abi() - platforms = list(platforms or platform_tags()) - abis = list(abis) - if "none" not in abis: - abis.append("none") - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - - -def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: - """ - Yields Python versions in descending order. - - After the latest version, the major-only version will be yielded, and then - all previous versions of that major version. - """ - if len(py_version) > 1: - yield f"py{_version_nodot(py_version[:2])}" - yield f"py{py_version[0]}" - if len(py_version) > 1: - for minor in range(py_version[1] - 1, -1, -1): - yield f"py{_version_nodot((py_version[0], minor))}" - - -def compatible_tags( - python_version: Optional[PythonVersion] = None, - interpreter: Optional[str] = None, - platforms: Optional[Iterable[str]] = None, -) -> Iterator[Tag]: - """ - Yields the sequence of tags that are compatible with a specific version of Python. - - The tags consist of: - - py*-none- - - -none-any # ... if `interpreter` is provided. - - py*-none-any - """ - if not python_version: - python_version = sys.version_info[:2] - platforms = list(platforms or platform_tags()) - for version in _py_interpreter_range(python_version): - for platform_ in platforms: - yield Tag(version, "none", platform_) - if interpreter: - yield Tag(interpreter, "none", "any") - for version in _py_interpreter_range(python_version): - yield Tag(version, "none", "any") - - -def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: - if not is_32bit: - return arch - - if arch.startswith("ppc"): - return "ppc" - - return "i386" - - -def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: - formats = [cpu_arch] - if cpu_arch == "x86_64": - if version < (10, 4): - return [] - formats.extend(["intel", "fat64", "fat32"]) - - elif cpu_arch == "i386": - if version < (10, 4): - return [] - formats.extend(["intel", "fat32", "fat"]) - - elif cpu_arch == "ppc64": - # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? - if version > (10, 5) or version < (10, 4): - return [] - formats.append("fat64") - - elif cpu_arch == "ppc": - if version > (10, 6): - return [] - formats.extend(["fat32", "fat"]) - - if cpu_arch in {"arm64", "x86_64"}: - formats.append("universal2") - - if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: - formats.append("universal") - - return formats - - -def mac_platforms( - version: Optional[MacVersion] = None, arch: Optional[str] = None -) -> Iterator[str]: - """ - Yields the platform tags for a macOS system. - - The `version` parameter is a two-item tuple specifying the macOS version to - generate platform tags for. The `arch` parameter is the CPU architecture to - generate platform tags for. Both parameters default to the appropriate value - for the current system. - """ - version_str, _, cpu_arch = platform.mac_ver() - if version is None: - version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) - else: - version = version - if arch is None: - arch = _mac_arch(cpu_arch) - else: - arch = arch - - if (10, 0) <= version and version < (11, 0): - # Prior to Mac OS 11, each yearly release of Mac OS bumped the - # "minor" version number. The major version was always 10. - for minor_version in range(version[1], -1, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=10, minor=minor_version, binary_format=binary_format - ) - - if version >= (11, 0): - # Starting with Mac OS 11, each yearly release bumps the major version - # number. The minor versions are now the midyear updates. - for major_version in range(version[0], 10, -1): - compat_version = major_version, 0 - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=major_version, minor=0, binary_format=binary_format - ) - - if version >= (11, 0): - # Mac OS 11 on x86_64 is compatible with binaries from previous releases. - # Arm64 support was introduced in 11.0, so no Arm binaries from previous - # releases exist. - # - # However, the "universal2" binary format can have a - # macOS version earlier than 11.0 when the x86_64 part of the binary supports - # that version of macOS. - if arch == "x86_64": - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - else: - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_format = "universal2" - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - - -def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: - linux = _normalize_string(sysconfig.get_platform()) - if is_32bit: - if linux == "linux_x86_64": - linux = "linux_i686" - elif linux == "linux_aarch64": - linux = "linux_armv7l" - _, arch = linux.split("_", 1) - yield from _manylinux.platform_tags(linux, arch) - yield from _musllinux.platform_tags(arch) - yield linux - - -def _generic_platforms() -> Iterator[str]: - yield _normalize_string(sysconfig.get_platform()) - - -def platform_tags() -> Iterator[str]: - """ - Provides the platform tags for this installation. - """ - if platform.system() == "Darwin": - return mac_platforms() - elif platform.system() == "Linux": - return _linux_platforms() - else: - return _generic_platforms() - - -def interpreter_name() -> str: - """ - Returns the name of the running interpreter. - """ - name = sys.implementation.name - return INTERPRETER_SHORT_NAMES.get(name) or name - - -def interpreter_version(*, warn: bool = False) -> str: - """ - Returns the version of the running interpreter. - """ - version = _get_config_var("py_version_nodot", warn=warn) - if version: - version = str(version) - else: - version = _version_nodot(sys.version_info[:2]) - return version - - -def _version_nodot(version: PythonVersion) -> str: - return "".join(map(str, version)) - - -def sys_tags(*, warn: bool = False) -> Iterator[Tag]: - """ - Returns the sequence of tag triples for the running interpreter. - - The order of the sequence corresponds to priority order for the - interpreter, from most to least important. - """ - - interp_name = interpreter_name() - if interp_name == "cp": - yield from cpython_tags(warn=warn) - else: - yield from generic_tags() - - if interp_name == "pp": - yield from compatible_tags(interpreter="pp3") - else: - yield from compatible_tags() diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/utils.py b/.eggs/packaging-21.3-py3.8.egg/packaging/utils.py deleted file mode 100644 index bab11b80c..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/utils.py +++ /dev/null @@ -1,136 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -from typing import FrozenSet, NewType, Tuple, Union, cast - -from .tags import Tag, parse_tag -from .version import InvalidVersion, Version - -BuildTag = Union[Tuple[()], Tuple[int, str]] -NormalizedName = NewType("NormalizedName", str) - - -class InvalidWheelFilename(ValueError): - """ - An invalid wheel filename was found, users should refer to PEP 427. - """ - - -class InvalidSdistFilename(ValueError): - """ - An invalid sdist filename was found, users should refer to the packaging user guide. - """ - - -_canonicalize_regex = re.compile(r"[-_.]+") -# PEP 427: The build number must start with a digit. -_build_tag_regex = re.compile(r"(\d+)(.*)") - - -def canonicalize_name(name: str) -> NormalizedName: - # This is taken from PEP 503. - value = _canonicalize_regex.sub("-", name).lower() - return cast(NormalizedName, value) - - -def canonicalize_version(version: Union[Version, str]) -> str: - """ - This is very similar to Version.__str__, but has one subtle difference - with the way it handles the release segment. - """ - if isinstance(version, str): - try: - parsed = Version(version) - except InvalidVersion: - # Legacy versions cannot be normalized - return version - else: - parsed = version - - parts = [] - - # Epoch - if parsed.epoch != 0: - parts.append(f"{parsed.epoch}!") - - # Release segment - # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) - - # Pre-release - if parsed.pre is not None: - parts.append("".join(str(x) for x in parsed.pre)) - - # Post-release - if parsed.post is not None: - parts.append(f".post{parsed.post}") - - # Development release - if parsed.dev is not None: - parts.append(f".dev{parsed.dev}") - - # Local version segment - if parsed.local is not None: - parts.append(f"+{parsed.local}") - - return "".join(parts) - - -def parse_wheel_filename( - filename: str, -) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: - if not filename.endswith(".whl"): - raise InvalidWheelFilename( - f"Invalid wheel filename (extension must be '.whl'): {filename}" - ) - - filename = filename[:-4] - dashes = filename.count("-") - if dashes not in (4, 5): - raise InvalidWheelFilename( - f"Invalid wheel filename (wrong number of parts): {filename}" - ) - - parts = filename.split("-", dashes - 2) - name_part = parts[0] - # See PEP 427 for the rules on escaping the project name - if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: - raise InvalidWheelFilename(f"Invalid project name: {filename}") - name = canonicalize_name(name_part) - version = Version(parts[1]) - if dashes == 5: - build_part = parts[2] - build_match = _build_tag_regex.match(build_part) - if build_match is None: - raise InvalidWheelFilename( - f"Invalid build number: {build_part} in '{filename}'" - ) - build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) - else: - build = () - tags = parse_tag(parts[-1]) - return (name, version, build, tags) - - -def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: - if filename.endswith(".tar.gz"): - file_stem = filename[: -len(".tar.gz")] - elif filename.endswith(".zip"): - file_stem = filename[: -len(".zip")] - else: - raise InvalidSdistFilename( - f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" - f" {filename}" - ) - - # We are requiring a PEP 440 version, which cannot contain dashes, - # so we split on the last dash. - name_part, sep, version_part = file_stem.rpartition("-") - if not sep: - raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") - - name = canonicalize_name(name_part) - version = Version(version_part) - return (name, version) diff --git a/.eggs/packaging-21.3-py3.8.egg/packaging/version.py b/.eggs/packaging-21.3-py3.8.egg/packaging/version.py deleted file mode 100644 index de9a09a4e..000000000 --- a/.eggs/packaging-21.3-py3.8.egg/packaging/version.py +++ /dev/null @@ -1,504 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import collections -import itertools -import re -import warnings -from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union - -from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType - -__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] - -InfiniteTypes = Union[InfinityType, NegativeInfinityType] -PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] -SubLocalType = Union[InfiniteTypes, int, str] -LocalType = Union[ - NegativeInfinityType, - Tuple[ - Union[ - SubLocalType, - Tuple[SubLocalType, str], - Tuple[NegativeInfinityType, SubLocalType], - ], - ..., - ], -] -CmpKey = Tuple[ - int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType -] -LegacyCmpKey = Tuple[int, Tuple[str, ...]] -VersionComparisonMethod = Callable[ - [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool -] - -_Version = collections.namedtuple( - "_Version", ["epoch", "release", "dev", "pre", "post", "local"] -) - - -def parse(version: str) -> Union["LegacyVersion", "Version"]: - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) - - -class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. - """ - - -class _BaseVersion: - _key: Union[CmpKey, LegacyCmpKey] - - def __hash__(self) -> int: - return hash(self._key) - - # Please keep the duplicated `isinstance` check - # in the six comparisons hereunder - # unless you find a way to avoid adding overhead function calls. - def __lt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key < other._key - - def __le__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key <= other._key - - def __eq__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key == other._key - - def __ge__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key >= other._key - - def __gt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key > other._key - - def __ne__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key != other._key - - -class LegacyVersion(_BaseVersion): - def __init__(self, version: str) -> None: - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def __str__(self) -> str: - return self._version - - def __repr__(self) -> str: - return f"" - - @property - def public(self) -> str: - return self._version - - @property - def base_version(self) -> str: - return self._version - - @property - def epoch(self) -> int: - return -1 - - @property - def release(self) -> None: - return None - - @property - def pre(self) -> None: - return None - - @property - def post(self) -> None: - return None - - @property - def dev(self) -> None: - return None - - @property - def local(self) -> None: - return None - - @property - def is_prerelease(self) -> bool: - return False - - @property - def is_postrelease(self) -> bool: - return False - - @property - def is_devrelease(self) -> bool: - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s: str) -> Iterator[str]: - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version: str) -> LegacyCmpKey: - - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts: List[str] = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - - return epoch, tuple(parts) - - -# Deliberately not anchored to the start and end of the string, to make it -# easier for 3rd party code to reuse -VERSION_PATTERN = r""" - v? - (?: - (?:(?P[0-9]+)!)? # epoch - (?P[0-9]+(?:\.[0-9]+)*) # release segment - (?P
                                          # pre-release
-            [-_\.]?
-            (?P(a|b|c|rc|alpha|beta|pre|preview))
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-        (?P                                         # post release
-            (?:-(?P[0-9]+))
-            |
-            (?:
-                [-_\.]?
-                (?Ppost|rev|r)
-                [-_\.]?
-                (?P[0-9]+)?
-            )
-        )?
-        (?P                                          # dev release
-            [-_\.]?
-            (?Pdev)
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-    )
-    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
-"""
-
-
-class Version(_BaseVersion):
-
-    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
-    def __init__(self, version: str) -> None:
-
-        # Validate the version and parse it into pieces
-        match = self._regex.search(version)
-        if not match:
-            raise InvalidVersion(f"Invalid version: '{version}'")
-
-        # Store the parsed out pieces of the version
-        self._version = _Version(
-            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
-            release=tuple(int(i) for i in match.group("release").split(".")),
-            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
-            post=_parse_letter_version(
-                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
-            ),
-            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
-            local=_parse_local_version(match.group("local")),
-        )
-
-        # Generate a key which will be used for sorting
-        self._key = _cmpkey(
-            self._version.epoch,
-            self._version.release,
-            self._version.pre,
-            self._version.post,
-            self._version.dev,
-            self._version.local,
-        )
-
-    def __repr__(self) -> str:
-        return f""
-
-    def __str__(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        # Pre-release
-        if self.pre is not None:
-            parts.append("".join(str(x) for x in self.pre))
-
-        # Post-release
-        if self.post is not None:
-            parts.append(f".post{self.post}")
-
-        # Development release
-        if self.dev is not None:
-            parts.append(f".dev{self.dev}")
-
-        # Local version segment
-        if self.local is not None:
-            parts.append(f"+{self.local}")
-
-        return "".join(parts)
-
-    @property
-    def epoch(self) -> int:
-        _epoch: int = self._version.epoch
-        return _epoch
-
-    @property
-    def release(self) -> Tuple[int, ...]:
-        _release: Tuple[int, ...] = self._version.release
-        return _release
-
-    @property
-    def pre(self) -> Optional[Tuple[str, int]]:
-        _pre: Optional[Tuple[str, int]] = self._version.pre
-        return _pre
-
-    @property
-    def post(self) -> Optional[int]:
-        return self._version.post[1] if self._version.post else None
-
-    @property
-    def dev(self) -> Optional[int]:
-        return self._version.dev[1] if self._version.dev else None
-
-    @property
-    def local(self) -> Optional[str]:
-        if self._version.local:
-            return ".".join(str(x) for x in self._version.local)
-        else:
-            return None
-
-    @property
-    def public(self) -> str:
-        return str(self).split("+", 1)[0]
-
-    @property
-    def base_version(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        return "".join(parts)
-
-    @property
-    def is_prerelease(self) -> bool:
-        return self.dev is not None or self.pre is not None
-
-    @property
-    def is_postrelease(self) -> bool:
-        return self.post is not None
-
-    @property
-    def is_devrelease(self) -> bool:
-        return self.dev is not None
-
-    @property
-    def major(self) -> int:
-        return self.release[0] if len(self.release) >= 1 else 0
-
-    @property
-    def minor(self) -> int:
-        return self.release[1] if len(self.release) >= 2 else 0
-
-    @property
-    def micro(self) -> int:
-        return self.release[2] if len(self.release) >= 3 else 0
-
-
-def _parse_letter_version(
-    letter: str, number: Union[str, bytes, SupportsInt]
-) -> Optional[Tuple[str, int]]:
-
-    if letter:
-        # We consider there to be an implicit 0 in a pre-release if there is
-        # not a numeral associated with it.
-        if number is None:
-            number = 0
-
-        # We normalize any letters to their lower case form
-        letter = letter.lower()
-
-        # We consider some words to be alternate spellings of other words and
-        # in those cases we want to normalize the spellings to our preferred
-        # spelling.
-        if letter == "alpha":
-            letter = "a"
-        elif letter == "beta":
-            letter = "b"
-        elif letter in ["c", "pre", "preview"]:
-            letter = "rc"
-        elif letter in ["rev", "r"]:
-            letter = "post"
-
-        return letter, int(number)
-    if not letter and number:
-        # We assume if we are given a number, but we are not given a letter
-        # then this is using the implicit post release syntax (e.g. 1.0-1)
-        letter = "post"
-
-        return letter, int(number)
-
-    return None
-
-
-_local_version_separators = re.compile(r"[\._-]")
-
-
-def _parse_local_version(local: str) -> Optional[LocalType]:
-    """
-    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
-    """
-    if local is not None:
-        return tuple(
-            part.lower() if not part.isdigit() else int(part)
-            for part in _local_version_separators.split(local)
-        )
-    return None
-
-
-def _cmpkey(
-    epoch: int,
-    release: Tuple[int, ...],
-    pre: Optional[Tuple[str, int]],
-    post: Optional[Tuple[str, int]],
-    dev: Optional[Tuple[str, int]],
-    local: Optional[Tuple[SubLocalType]],
-) -> CmpKey:
-
-    # When we compare a release version, we want to compare it with all of the
-    # trailing zeros removed. So we'll use a reverse the list, drop all the now
-    # leading zeros until we come to something non zero, then take the rest
-    # re-reverse it back into the correct order and make it a tuple and use
-    # that for our sorting key.
-    _release = tuple(
-        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
-    )
-
-    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
-    # We'll do this by abusing the pre segment, but we _only_ want to do this
-    # if there is not a pre or a post segment. If we have one of those then
-    # the normal sorting rules will handle this case correctly.
-    if pre is None and post is None and dev is not None:
-        _pre: PrePostDevType = NegativeInfinity
-    # Versions without a pre-release (except as noted above) should sort after
-    # those with one.
-    elif pre is None:
-        _pre = Infinity
-    else:
-        _pre = pre
-
-    # Versions without a post segment should sort before those with one.
-    if post is None:
-        _post: PrePostDevType = NegativeInfinity
-
-    else:
-        _post = post
-
-    # Versions without a development segment should sort after those with one.
-    if dev is None:
-        _dev: PrePostDevType = Infinity
-
-    else:
-        _dev = dev
-
-    if local is None:
-        # Versions without a local segment should sort before those with one.
-        _local: LocalType = NegativeInfinity
-    else:
-        # Versions with a local segment need that segment parsed to implement
-        # the sorting rules in PEP440.
-        # - Alpha numeric segments sort before numeric segments
-        # - Alpha numeric segments sort lexicographically
-        # - Numeric segments sort numerically
-        # - Shorter versions sort before longer versions when the prefixes
-        #   match exactly
-        _local = tuple(
-            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
-        )
-
-    return epoch, _release, _pre, _post, _dev, _local
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/LICENSE b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/LICENSE
deleted file mode 100644
index 1bf98523e..000000000
--- a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/LICENSE
+++ /dev/null
@@ -1,18 +0,0 @@
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/PKG-INFO
deleted file mode 100644
index bdfed0b61..000000000
--- a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/PKG-INFO
+++ /dev/null
@@ -1,109 +0,0 @@
-Metadata-Version: 2.1
-Name: pyparsing
-Version: 3.0.6
-Summary: Python parsing module
-Home-page: https://github.com/pyparsing/pyparsing/
-Author: Paul McGuire
-Author-email: ptmcg.gm+pyparsing@gmail.com
-License: MIT License
-Download-URL: https://pypi.org/project/pyparsing/
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: Intended Audience :: Information Technology
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: 3.10
-Classifier: Programming Language :: Python :: 3 :: Only
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Requires-Python: >=3.6
-Description-Content-Type: text/x-rst
-Provides-Extra: diagrams
-Requires-Dist: jinja2 ; extra == 'diagrams'
-Requires-Dist: railroad-diagrams ; extra == 'diagrams'
-
-PyParsing -- A Python Parsing Module
-====================================
-
-|Build Status| |Coverage|
-
-Introduction
-============
-
-The pyparsing module is an alternative approach to creating and
-executing simple grammars, vs. the traditional lex/yacc approach, or the
-use of regular expressions. The pyparsing module provides a library of
-classes that client code uses to construct the grammar directly in
-Python code.
-
-*[Since first writing this description of pyparsing in late 2003, this
-technique for developing parsers has become more widespread, under the
-name Parsing Expression Grammars - PEGs. See more information on PEGs*
-`here `__
-*.]*
-
-Here is a program to parse ``"Hello, World!"`` (or any greeting of the form
-``"salutation, addressee!"``):
-
-.. code:: python
-
-    from pyparsing import Word, alphas
-    greet = Word(alphas) + "," + Word(alphas) + "!"
-    hello = "Hello, World!"
-    print(hello, "->", greet.parseString(hello))
-
-The program outputs the following::
-
-    Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the
-self-explanatory class names, and the use of '+', '|' and '^' operator
-definitions.
-
-The parsed results returned from ``parseString()`` is a collection of type
-``ParseResults``, which can be accessed as a
-nested list, a dictionary, or an object with named attributes.
-
-The pyparsing module handles some of the problems that are typically
-vexing when writing text parsers:
-
-- extra or missing whitespace (the above program will also handle ``"Hello,World!"``, ``"Hello , World !"``, etc.)
-- quoted strings
-- embedded comments
-
-The examples directory includes a simple SQL parser, simple CORBA IDL
-parser, a config file parser, a chemical formula parser, and a four-
-function algebraic notation parser, among many others.
-
-Documentation
-=============
-
-There are many examples in the online docstrings of the classes
-and methods in pyparsing. You can find them compiled into `online docs `__. Additional
-documentation resources and project info are listed in the online
-`GitHub wiki `__. An
-entire directory of examples can be found `here `__.
-
-License
-=======
-
-MIT License. See header of the `pyparsing.py `__ file.
-
-History
-=======
-
-See `CHANGES `__ file.
-
-.. |Build Status| image:: https://travis-ci.com/pyparsing/pyparsing.svg?branch=master
-   :target: https://travis-ci.com/pyparsing/pyparsing
-.. |Coverage| image:: https://codecov.io/gh/pyparsing/pyparsing/branch/master/graph/badge.svg
-  :target: https://codecov.io/gh/pyparsing/pyparsing
-
-
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD
deleted file mode 100644
index 133234be2..000000000
--- a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/RECORD
+++ /dev/null
@@ -1,17 +0,0 @@
-pyparsing/__init__.py,sha256=3P-TNmKx_H__Ygk98CCZML_ksmPr5ZeSkhigL6RzUPQ,9095
-pyparsing/actions.py,sha256=60v7mETOBzc01YPH_qQD5isavgcSJpAfIKpzgjM3vaU,6429
-pyparsing/common.py,sha256=lFL97ooIeR75CmW5hjURZqwDCTgruqltcTCZ-ulLO2Q,12936
-pyparsing/core.py,sha256=SAChE9VFq7e5SZ0ggo__-HtVFxCrast_xO_todzqBZ4,210727
-pyparsing/exceptions.py,sha256=H4D9gqMavqmAFSsdrU_J6bO-jA-T-A7yvtXWZpooIUA,9030
-pyparsing/helpers.py,sha256=rKkeQ2UExJuBfksZhSZKqME9iXhdGdsl7686_M0nwXE,37881
-pyparsing/results.py,sha256=VLYlrNL_wqsJ1EFDffJzpt4MNyEDqKTgXHnb7eKzQXs,25295
-pyparsing/testing.py,sha256=szs8AKZREZMhL0y0vsMfaTVAnpqPHetg6VKJBNmc4QY,13388
-pyparsing/unicode.py,sha256=0QLjg83PQssSC6dkaZRm9wChE10mDi8kYEO4EvDB8qg,10379
-pyparsing/util.py,sha256=U-juTQjXJ0fqLEX3BBZVBlbAMHrQiGUBMojXYNbnGEM,6734
-pyparsing/diagram/__init__.py,sha256=yySG7RAh6JHuM8xewjaZjY4EWlIc6bX6neHxzTOjuoM,22136
-pyparsing/diagram/template.jinja2,sha256=SfQ8SLktSBqI5W1DGcUVH1vdflRD6x2sQBApxrcNg7s,589
-pyparsing-3.0.6.dist-info/LICENSE,sha256=ENUSChaAWAT_2otojCIL-06POXQbVzIGBNRVowngGXI,1023
-pyparsing-3.0.6.dist-info/METADATA,sha256=16W9SlEjUqdw6HHeEjo4bH_g4_Ti8hYzuf68RRYv5Zg,4169
-pyparsing-3.0.6.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
-pyparsing-3.0.6.dist-info/top_level.txt,sha256=eUOjGzJVhlQ3WS2rFAy2mN3LX_7FKTM5GSJ04jfnLmU,10
-pyparsing-3.0.6.dist-info/RECORD,,
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL
deleted file mode 100644
index 385faab05..000000000
--- a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.36.2)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/requires.txt b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/requires.txt
deleted file mode 100644
index e185380d6..000000000
--- a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/requires.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-
-[diagrams]
-jinja2
-railroad-diagrams
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/top_level.txt b/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/top_level.txt
deleted file mode 100644
index 210dfec50..000000000
--- a/.eggs/pyparsing-3.0.6-py3.8.egg/EGG-INFO/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-pyparsing
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/__init__.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/__init__.py
deleted file mode 100644
index 288618fe7..000000000
--- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/__init__.py
+++ /dev/null
@@ -1,328 +0,0 @@
-# module pyparsing.py
-#
-# Copyright (c) 2003-2021  Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = """
-pyparsing module - Classes and methods to define and execute parsing grammars
-=============================================================================
-
-The pyparsing module is an alternative approach to creating and
-executing simple grammars, vs. the traditional lex/yacc approach, or the
-use of regular expressions.  With pyparsing, you don't need to learn
-a new syntax for defining grammars or matching expressions - the parsing
-module provides a library of classes that you use to construct the
-grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form
-``", !"``), built up using :class:`Word`,
-:class:`Literal`, and :class:`And` elements
-(the :meth:`'+'` operators create :class:`And` expressions,
-and the strings are auto-converted to :class:`Literal` expressions)::
-
-    from pyparsing import Word, alphas
-
-    # define grammar of a greeting
-    greet = Word(alphas) + "," + Word(alphas) + "!"
-
-    hello = "Hello, World!"
-    print(hello, "->", greet.parse_string(hello))
-
-The program outputs the following::
-
-    Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the
-self-explanatory class names, and the use of :class:`'+'`,
-:class:`'|'`, :class:`'^'` and :class:`'&'` operators.
-
-The :class:`ParseResults` object returned from
-:class:`ParserElement.parseString` can be
-accessed as a nested list, a dictionary, or an object with named
-attributes.
-
-The pyparsing module handles some of the problems that are typically
-vexing when writing text parsers:
-
-  - extra or missing whitespace (the above program will also handle
-    "Hello,World!", "Hello  ,  World  !", etc.)
-  - quoted strings
-  - embedded comments
-
-
-Getting Started -
------------------
-Visit the classes :class:`ParserElement` and :class:`ParseResults` to
-see the base classes that most other pyparsing
-classes inherit from. Use the docstrings for examples of how to:
-
- - construct literal match expressions from :class:`Literal` and
-   :class:`CaselessLiteral` classes
- - construct character word-group expressions using the :class:`Word`
-   class
- - see how to create repetitive expressions using :class:`ZeroOrMore`
-   and :class:`OneOrMore` classes
- - use :class:`'+'`, :class:`'|'`, :class:`'^'`,
-   and :class:`'&'` operators to combine simple expressions into
-   more complex ones
- - associate names with your parsed results using
-   :class:`ParserElement.setResultsName`
- - access the parsed data, which is returned as a :class:`ParseResults`
-   object
- - find some helpful expression short-cuts like :class:`delimitedList`
-   and :class:`oneOf`
- - find more useful common expressions in the :class:`pyparsing_common`
-   namespace class
-"""
-from typing import NamedTuple
-
-
-class version_info(NamedTuple):
-    major: int
-    minor: int
-    micro: int
-    releaselevel: str
-    serial: int
-
-    @property
-    def __version__(self):
-        return "{}.{}.{}".format(self.major, self.minor, self.micro) + (
-            "{}{}{}".format(
-                "r" if self.releaselevel[0] == "c" else "",
-                self.releaselevel[0],
-                self.serial,
-            ),
-            "",
-        )[self.releaselevel == "final"]
-
-    def __str__(self):
-        return "{} {} / {}".format(__name__, self.__version__, __version_time__)
-
-    def __repr__(self):
-        return "{}.{}({})".format(
-            __name__,
-            type(self).__name__,
-            ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
-        )
-
-
-__version_info__ = version_info(3, 0, 6, "final", 0)
-__version_time__ = "12 Nov 2021 16:06 UTC"
-__version__ = __version_info__.__version__
-__versionTime__ = __version_time__
-__author__ = "Paul McGuire "
-
-from .util import *
-from .exceptions import *
-from .actions import *
-from .core import __diag__, __compat__
-from .results import *
-from .core import *
-from .core import _builtin_exprs as core_builtin_exprs
-from .helpers import *
-from .helpers import _builtin_exprs as helper_builtin_exprs
-
-from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
-from .testing import pyparsing_test as testing
-from .common import (
-    pyparsing_common as common,
-    _builtin_exprs as common_builtin_exprs,
-)
-
-# define backward compat synonyms
-if "pyparsing_unicode" not in globals():
-    pyparsing_unicode = unicode
-if "pyparsing_common" not in globals():
-    pyparsing_common = common
-if "pyparsing_test" not in globals():
-    pyparsing_test = testing
-
-core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
-
-
-__all__ = [
-    "__version__",
-    "__version_time__",
-    "__author__",
-    "__compat__",
-    "__diag__",
-    "And",
-    "AtLineStart",
-    "AtStringStart",
-    "CaselessKeyword",
-    "CaselessLiteral",
-    "CharsNotIn",
-    "Combine",
-    "Dict",
-    "Each",
-    "Empty",
-    "FollowedBy",
-    "Forward",
-    "GoToColumn",
-    "Group",
-    "IndentedBlock",
-    "Keyword",
-    "LineEnd",
-    "LineStart",
-    "Literal",
-    "Located",
-    "PrecededBy",
-    "MatchFirst",
-    "NoMatch",
-    "NotAny",
-    "OneOrMore",
-    "OnlyOnce",
-    "OpAssoc",
-    "Opt",
-    "Optional",
-    "Or",
-    "ParseBaseException",
-    "ParseElementEnhance",
-    "ParseException",
-    "ParseExpression",
-    "ParseFatalException",
-    "ParseResults",
-    "ParseSyntaxException",
-    "ParserElement",
-    "PositionToken",
-    "QuotedString",
-    "RecursiveGrammarException",
-    "Regex",
-    "SkipTo",
-    "StringEnd",
-    "StringStart",
-    "Suppress",
-    "Token",
-    "TokenConverter",
-    "White",
-    "Word",
-    "WordEnd",
-    "WordStart",
-    "ZeroOrMore",
-    "Char",
-    "alphanums",
-    "alphas",
-    "alphas8bit",
-    "any_close_tag",
-    "any_open_tag",
-    "c_style_comment",
-    "col",
-    "common_html_entity",
-    "counted_array",
-    "cpp_style_comment",
-    "dbl_quoted_string",
-    "dbl_slash_comment",
-    "delimited_list",
-    "dict_of",
-    "empty",
-    "hexnums",
-    "html_comment",
-    "identchars",
-    "identbodychars",
-    "java_style_comment",
-    "line",
-    "line_end",
-    "line_start",
-    "lineno",
-    "make_html_tags",
-    "make_xml_tags",
-    "match_only_at_col",
-    "match_previous_expr",
-    "match_previous_literal",
-    "nested_expr",
-    "null_debug_action",
-    "nums",
-    "one_of",
-    "printables",
-    "punc8bit",
-    "python_style_comment",
-    "quoted_string",
-    "remove_quotes",
-    "replace_with",
-    "replace_html_entity",
-    "rest_of_line",
-    "sgl_quoted_string",
-    "srange",
-    "string_end",
-    "string_start",
-    "trace_parse_action",
-    "unicode_string",
-    "with_attribute",
-    "indentedBlock",
-    "original_text_for",
-    "ungroup",
-    "infix_notation",
-    "locatedExpr",
-    "with_class",
-    "CloseMatch",
-    "token_map",
-    "pyparsing_common",
-    "pyparsing_unicode",
-    "unicode_set",
-    "condition_as_parse_action",
-    "pyparsing_test",
-    # pre-PEP8 compatibility names
-    "__versionTime__",
-    "anyCloseTag",
-    "anyOpenTag",
-    "cStyleComment",
-    "commonHTMLEntity",
-    "countedArray",
-    "cppStyleComment",
-    "dblQuotedString",
-    "dblSlashComment",
-    "delimitedList",
-    "dictOf",
-    "htmlComment",
-    "javaStyleComment",
-    "lineEnd",
-    "lineStart",
-    "makeHTMLTags",
-    "makeXMLTags",
-    "matchOnlyAtCol",
-    "matchPreviousExpr",
-    "matchPreviousLiteral",
-    "nestedExpr",
-    "nullDebugAction",
-    "oneOf",
-    "opAssoc",
-    "pythonStyleComment",
-    "quotedString",
-    "removeQuotes",
-    "replaceHTMLEntity",
-    "replaceWith",
-    "restOfLine",
-    "sglQuotedString",
-    "stringEnd",
-    "stringStart",
-    "traceParseAction",
-    "unicodeString",
-    "withAttribute",
-    "indentedBlock",
-    "originalTextFor",
-    "infixNotation",
-    "locatedExpr",
-    "withClass",
-    "tokenMap",
-    "conditionAsParseAction",
-    "autoname_elements",
-]
diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/actions.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/actions.py
deleted file mode 100644
index 2bcc5502b..000000000
--- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/actions.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# actions.py
-
-from .exceptions import ParseException
-from .util import col
-
-
-class OnlyOnce:
-    """
-    Wrapper for parse actions, to ensure they are only called once.
-    """
-
-    def __init__(self, method_call):
-        from .core import _trim_arity
-
-        self.callable = _trim_arity(method_call)
-        self.called = False
-
-    def __call__(self, s, l, t):
-        if not self.called:
-            results = self.callable(s, l, t)
-            self.called = True
-            return results
-        raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
-
-    def reset(self):
-        """
-        Allow the associated parse action to be called once more.
-        """
-
-        self.called = False
-
-
-def match_only_at_col(n):
-    """
-    Helper method for defining parse actions that require matching at
-    a specific column in the input text.
-    """
-
-    def verify_col(strg, locn, toks):
-        if col(locn, strg) != n:
-            raise ParseException(strg, locn, "matched token not at column {}".format(n))
-
-    return verify_col
-
-
-def replace_with(repl_str):
-    """
-    Helper method for common parse actions that simply return
-    a literal value.  Especially useful when used with
-    :class:`transform_string` ().
-
-    Example::
-
-        num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
-        na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
-        term = na | num
-
-        OneOrMore(term).parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
-    """
-    return lambda s, l, t: [repl_str]
-
-
-def remove_quotes(s, l, t):
-    """
-    Helper parse action for removing quotation marks from parsed
-    quoted strings.
-
-    Example::
-
-        # by default, quotation marks are included in parsed results
-        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
-        # use remove_quotes to strip quotation marks from parsed results
-        quoted_string.set_parse_action(remove_quotes)
-        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
-    """
-    return t[0][1:-1]
-
-
-def with_attribute(*args, **attr_dict):
-    """
-    Helper to create a validating parse action to be used with start
-    tags created with :class:`make_xml_tags` or
-    :class:`make_html_tags`. Use ``with_attribute`` to qualify
-    a starting tag with a required attribute value, to avoid false
-    matches on common tags such as ```` or ``
``. - - Call ``with_attribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`with_class`. - - To verify that the attribute exists, but without specifying a value, - pass ``with_attribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = make_html_tags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().set_parse_action(with_attribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attr_dict.items() - attrs = [(k, v) for k, v in attrs] - - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException( - s, - l, - "attribute {!r} has value {!r}, must be {!r}".format( - attrName, tokens[attrName], attrValue - ), - ) - - return pa - - -with_attribute.ANY_VALUE = object() - - -def with_class(classname, namespace=""): - """ - Simplified version of :class:`with_attribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = make_html_tags("div") - div_grid = div().set_parse_action(with_class("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "{}:class".format(namespace) if namespace else "class" - return with_attribute(**{classattr: classname}) - - -# pre-PEP8 compatibility symbols -replaceWith = replace_with -removeQuotes = remove_quotes -withAttribute = with_attribute -withClass = with_class -matchOnlyAtCol = match_only_at_col diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/common.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/common.py deleted file mode 100644 index 1859fb79c..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/common.py +++ /dev/null @@ -1,424 +0,0 @@ -# common.py -from .core import * -from .helpers import delimited_list, any_open_tag, any_close_tag -from datetime import datetime - - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - :class:`url` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convert_to_integer = token_map(int) - """ - Parse action for converting parsed integers to Python int - """ - - convert_to_float = token_map(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = ( - Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) - ) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = ( - Regex(r"[+-]?\d+") - .set_name("signed integer") - .set_parse_action(convert_to_integer) - ) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = ( - signed_integer().set_parse_action(convert_to_float) - + "/" - + signed_integer().set_parse_action(convert_to_float) - ).set_name("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) - - mixed_integer = ( - fraction | signed_integer + Opt(Opt("-").suppress() + fraction) - ).set_name("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.add_parse_action(sum) - - real = ( - Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") - .set_name("real number") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number and returns a float""" - - sci_real = ( - Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") - .set_name("real number with scientific notation") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).setName("number").streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = ( - Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") - .set_name("fnumber") - .set_parse_action(convert_to_float) - ) - """any int or real number, returned as float""" - - identifier = Word(identchars, identbodychars).set_name("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex( - r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" - ).set_name("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") - _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( - "full IPv6 address" - ) - _short_ipv6_address = ( - Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - + "::" - + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - ).set_name("short IPv6 address") - _short_ipv6_address.add_condition( - lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 - ) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") - ipv6_address = Combine( - (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( - "IPv6 address" - ) - ).set_name("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex( - r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" - ).set_name("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convert_to_date(fmt: str = "%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - - def cvt_fn(ss, ll, tt): - try: - return datetime.strptime(tt[0], fmt).date() - except ValueError as ve: - raise ParseException(ss, ll, str(ve)) - - return cvt_fn - - @staticmethod - def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - - return cvt_fn - - iso8601_date = Regex( - r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" - ).set_name("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex( - r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" - ).set_name("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() - - @staticmethod - def strip_html_tags(s: str, l: int, tokens: ParseResults): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the pyparsing wiki page' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transform_string(tokens[0]) - - _commasepitem = ( - Combine( - OneOrMore( - ~Literal(",") - + ~LineEnd() - + Word(printables, exclude_chars=",") - + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) - ) - ) - .streamline() - .set_name("commaItem") - ) - comma_separated_list = delimited_list( - Opt(quoted_string.copy() | _commasepitem, default="") - ).set_name("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcase_tokens = staticmethod(token_map(lambda t: t.upper())) - """Parse action to convert tokens to upper case.""" - - downcase_tokens = staticmethod(token_map(lambda t: t.lower())) - """Parse action to convert tokens to lower case.""" - - # fmt: off - url = Regex( - # https://mathiasbynens.be/demo/url-regex - # https://gist.github.com/dperini/729294 - r"^" + - # protocol identifier (optional) - # short syntax // still required - r"(?:(?:(?Phttps?|ftp):)?\/\/)" + - # user:pass BasicAuth (optional) - r"(?:(?P\S+(?::\S*)?)@)?" + - r"(?P" + - # IP address exclusion - # private & local networks - r"(?!(?:10|127)(?:\.\d{1,3}){3})" + - r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + - r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + - # IP address dotted notation octets - # excludes loopback network 0.0.0.0 - # excludes reserved space >= 224.0.0.0 - # excludes network & broadcast addresses - # (first & last IP address of each class) - r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + - r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + - r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + - r"|" + - # host & domain names, may end with dot - # can be replaced by a shortest alternative - # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ - r"(?:" + - r"(?:" + - r"[a-z0-9\u00a1-\uffff]" + - r"[a-z0-9\u00a1-\uffff_-]{0,62}" + - r")?" + - r"[a-z0-9\u00a1-\uffff]\." + - r")+" + - # TLD identifier name, may end with dot - r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + - r")" + - # port number (optional) - r"(:(?P\d{2,5}))?" + - # resource path (optional) - r"(?P\/[^?# ]*)?" + - # query string (optional) - r"(\?(?P[^#]*))?" + - # fragment (optional) - r"(#(?P\S*))?" + - r"$" - ).set_name("url") - # fmt: on - - # pre-PEP8 compatibility names - convertToInteger = convert_to_integer - convertToFloat = convert_to_float - convertToDate = convert_to_date - convertToDatetime = convert_to_datetime - stripHTMLTags = strip_html_tags - upcaseTokens = upcase_tokens - downcaseTokens = downcase_tokens - - -_builtin_exprs = [ - v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) -] diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/core.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/core.py deleted file mode 100644 index ff24eee50..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/core.py +++ /dev/null @@ -1,5772 +0,0 @@ -# -# core.py -# -import os -from typing import ( - Optional as OptionalType, - Iterable as IterableType, - Union, - Callable, - Any, - Generator, - Tuple, - List, - TextIO, - Set, - Dict as DictType, -) -from abc import ABC, abstractmethod -from enum import Enum -import string -import copy -import warnings -import re -import sre_constants -import sys -from collections.abc import Iterable -import traceback -import types -from operator import itemgetter -from functools import wraps -from threading import RLock -from pathlib import Path - -from .util import ( - _FifoCache, - _UnboundedCache, - __config_flags, - _collapse_string_to_ranges, - _escape_regex_range_chars, - _bslash, - _flatten, - LRUMemo as _LRUMemo, - UnboundedMemo as _UnboundedMemo, -) -from .exceptions import * -from .actions import * -from .results import ParseResults, _ParseResultsWithOffset -from .unicode import pyparsing_unicode - -_MAX_INT = sys.maxsize -str_type: Tuple[type, ...] = (str, bytes) - -# -# Copyright (c) 2003-2021 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - - -class __compat__(__config_flags): - """ - A cross-version compatibility configuration for pyparsing features that will be - released in a future version. By setting values in this configuration to True, - those features can be enabled in prior versions for compatibility development - and testing. - - - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping - of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; - maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 - behavior - """ - - _type_desc = "compatibility" - - collect_all_And_tokens = True - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _fixed_names = """ - collect_all_And_tokens - """.split() - - -class __diag__(__config_flags): - _type_desc = "diagnostic" - - warn_multiple_tokens_in_named_alternation = False - warn_ungrouped_named_tokens_in_collection = False - warn_name_set_on_empty_Forward = False - warn_on_parse_using_empty_Forward = False - warn_on_assignment_to_Forward = False - warn_on_multiple_string_args_to_oneof = False - warn_on_match_first_with_lshift_operator = False - enable_debug_on_named_expressions = False - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _warning_names = [name for name in _all_names if name.startswith("warn")] - _debug_names = [name for name in _all_names if name.startswith("enable_debug")] - - @classmethod - def enable_all_warnings(cls): - for name in cls._warning_names: - cls.enable(name) - - -class Diagnostics(Enum): - """ - Diagnostic configuration (all default to disabled) - - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results - name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions - - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results - name is defined on a containing expression with ungrouped subexpressions that also - have results names - - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined - with a results name, but has no contents defined - - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is - defined in a grammar but has never had an expression attached to it - - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined - but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` - - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is - incorrectly called with multiple str arguments - - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent - calls to :class:`ParserElement.set_name` - - Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. - All warnings can be enabled by calling :class:`enable_all_warnings`. - """ - - warn_multiple_tokens_in_named_alternation = 0 - warn_ungrouped_named_tokens_in_collection = 1 - warn_name_set_on_empty_Forward = 2 - warn_on_parse_using_empty_Forward = 3 - warn_on_assignment_to_Forward = 4 - warn_on_multiple_string_args_to_oneof = 5 - warn_on_match_first_with_lshift_operator = 6 - enable_debug_on_named_expressions = 7 - - -def enable_diag(diag_enum): - """ - Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.enable(diag_enum.name) - - -def disable_diag(diag_enum): - """ - Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.disable(diag_enum.name) - - -def enable_all_warnings(): - """ - Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). - """ - __diag__.enable_all_warnings() - - -# hide abstract class -del __config_flags - - -def _should_enable_warnings( - cmd_line_warn_options: List[str], warn_env_var: OptionalType[str] -) -> bool: - enable = bool(warn_env_var) - for warn_opt in cmd_line_warn_options: - w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( - ":" - )[:5] - if not w_action.lower().startswith("i") and ( - not (w_message or w_category or w_module) or w_module == "pyparsing" - ): - enable = True - elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): - enable = False - return enable - - -if _should_enable_warnings( - sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") -): - enable_all_warnings() - - -# build list of single arg builtins, that can be used as parse actions -_single_arg_builtins = { - sum, - len, - sorted, - reversed, - list, - tuple, - set, - any, - all, - min, - max, -} - -_generatorType = types.GeneratorType -ParseAction = Union[ - Callable[[], Any], - Callable[[ParseResults], Any], - Callable[[int, ParseResults], Any], - Callable[[str, int, ParseResults], Any], -] -ParseCondition = Union[ - Callable[[], bool], - Callable[[ParseResults], bool], - Callable[[int, ParseResults], bool], - Callable[[str, int, ParseResults], bool], -] -ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] -DebugStartAction = Callable[[str, int, "ParserElement", bool], None] -DebugSuccessAction = Callable[ - [str, int, int, "ParserElement", ParseResults, bool], None -] -DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] - - -alphas = string.ascii_uppercase + string.ascii_lowercase -identchars = pyparsing_unicode.Latin1.identchars -identbodychars = pyparsing_unicode.Latin1.identbodychars -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -printables = "".join(c for c in string.printable if c not in string.whitespace) - -_trim_arity_call_line = None - - -def _trim_arity(func, maxargs=2): - """decorator to trim function calls to match the arity of the target""" - global _trim_arity_call_line - - if func in _single_arg_builtins: - return lambda s, l, t: func(t) - - limit = 0 - found_arity = False - - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [frame_summary[:2]] - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - LINE_DIFF = 11 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - _trim_arity_call_line = ( - _trim_arity_call_line or traceback.extract_stack(limit=2)[-1] - ) - pa_call_line_synth = ( - _trim_arity_call_line[0], - _trim_arity_call_line[1] + LINE_DIFF, - ) - - def wrapper(*args): - nonlocal found_arity, limit - while 1: - try: - ret = func(*args[limit:]) - found_arity = True - return ret - except TypeError as te: - # re-raise TypeErrors if they did not come from our arity testing - if found_arity: - raise - else: - tb = te.__traceback__ - trim_arity_type_error = ( - extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth - ) - del tb - - if trim_arity_type_error: - if limit <= maxargs: - limit += 1 - continue - - raise - - # copy func name to wrapper for sensible debug output - # (can't use functools.wraps, since that messes with function signature) - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - wrapper.__name__ = func_name - - return wrapper - - -def condition_as_parse_action( - fn: ParseCondition, message: str = None, fatal: bool = False -): - """ - Function to convert a simple predicate function that returns ``True`` or ``False`` - into a parse action. Can be used in places when a parse action is required - and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition - to an operator level in :class:`infix_notation`). - - Optional keyword arguments: - - - ``message`` - define a custom message to be used in the raised exception - - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; - otherwise will raise :class:`ParseException` - - """ - msg = message if message is not None else "failed user-defined condition" - exc_type = ParseFatalException if fatal else ParseException - fn = _trim_arity(fn) - - @wraps(fn) - def pa(s, l, t): - if not bool(fn(s, l, t)): - raise exc_type(s, l, msg) - - return pa - - -def _default_start_debug_action( - instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False -): - cache_hit_str = "*" if cache_hit else "" - print( - ( - "{}Match {} at loc {}({},{})\n {}\n {}^".format( - cache_hit_str, - expr, - loc, - lineno(loc, instring), - col(loc, instring), - line(loc, instring), - " " * (col(loc, instring) - 1), - ) - ) - ) - - -def _default_success_debug_action( - instring: str, - startloc: int, - endloc: int, - expr: "ParserElement", - toks: ParseResults, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list())) - - -def _default_exception_debug_action( - instring: str, - loc: int, - expr: "ParserElement", - exc: Exception, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print( - "{}Match {} failed, {} raised: {}".format( - cache_hit_str, expr, type(exc).__name__, exc - ) - ) - - -def null_debug_action(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - - -class ParserElement(ABC): - """Abstract base level parser element class.""" - - DEFAULT_WHITE_CHARS: str = " \n\t\r" - verbose_stacktrace: bool = False - _literalStringClass: OptionalType[type] = None - - @staticmethod - def set_default_whitespace_chars(chars: str): - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, and newline - OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.set_default_whitespace_chars(" \t") - OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - # update whitespace all parse expressions defined in this module - for expr in _builtin_exprs: - if expr.copyDefaultWhiteChars: - expr.whiteChars = set(chars) - - @staticmethod - def inline_literals_using(cls: type): - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inline_literals_using(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - def __init__(self, savelist: bool = False): - self.parseAction: List[ParseAction] = list() - self.failAction: OptionalType[ParseFailAction] = None - self.customName = None - self._defaultName = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - # used when checking for left-recursion - self.mayReturnEmpty = False - self.keepTabs = False - self.ignoreExprs: List["ParserElement"] = list() - self.debug = False - self.streamlined = False - # optimize exception handling for subclasses that don't advance parse index - self.mayIndexError = True - self.errmsg = "" - # mark results names as modal (report only last) or cumulative (list all) - self.modalResults = True - # custom debug actions - self.debugActions: Tuple[ - OptionalType[DebugStartAction], - OptionalType[DebugSuccessAction], - OptionalType[DebugExceptionAction], - ] = (None, None, None) - self.re = None - # avoid redundant calls to preParse - self.callPreparse = True - self.callDuringTry = False - self.suppress_warnings_ = [] - - def suppress_warning(self, warning_type: Diagnostics): - """ - Suppress warnings emitted for a particular diagnostic on this expression. - - Example:: - - base = pp.Forward() - base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) - - # statement would normally raise a warning, but is now suppressed - print(base.parseString("x")) - - """ - self.suppress_warnings_.append(warning_type) - return self - - def copy(self) -> "ParserElement": - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") - integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - - print(OneOrMore(integerK | integerM | integer).parse_string("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - """ - cpy = copy.copy(self) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - return cpy - - def set_results_name( - self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False - ) -> "ParserElement": - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - - Normally, results names are assigned as you would assign keys in a dict: - any existing value is overwritten by later values. If it is necessary to - keep all values captured for a particular results name, call ``set_results_name`` - with ``list_all_matches`` = True. - - NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.set_results_name("name")`` - - see :class:`__call__`. If ``list_all_matches`` is required, use - ``expr("name*")``. - - Example:: - - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - listAllMatches = listAllMatches or list_all_matches - return self._setResultsName(name, listAllMatches) - - def _setResultsName(self, name, listAllMatches=False): - if name is None: - return self - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches = True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def set_break(self, break_flag: bool = True) -> "ParserElement": - """ - Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to - disable. - """ - if break_flag: - _parseMethod = self._parse - - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - - # this call to pdb.set_trace() is intentional, not a checkin error - pdb.set_trace() - return _parseMethod(instring, loc, doActions, callPreParse) - - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def set_parse_action( - self, *fns: ParseAction, **kwargs - ) -> OptionalType["ParserElement"]: - """ - Define one or more actions to perform when successfully matching parse element definition. - - Parse actions can be called to perform data conversions, do extra validation, - update external data structures, or enhance or replace the parsed tokens. - Each parse action ``fn`` is a callable method with 0-3 arguments, called as - ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object - - The parsed tokens are passed to the parse action as ParseResults. They can be - modified in place using list-style append, extend, and pop operations to update - the parsed list elements; and with dictionary-style item set and del operations - to add, update, or remove any named results. If the tokens are modified in place, - it is not necessary to return them with a return statement. - - Parse actions can also completely replace the given tokens, with another ``ParseResults`` - object, or with some entirely different object (common for parse actions that perform data - conversions). A convenient way to build a new parse result is to define the values - using a dict, and then create the return value using :class:`ParseResults.from_dict`. - - If None is passed as the ``fn`` parse action, all previously added parse actions for this - expression are cleared. - - Optional keyword arguments: - - - call_during_try = (default= ``False``) indicate if parse action should be run during - lookaheads and alternate testing. For parse actions that have side effects, it is - important to only call the parse action once it is determined that it is being - called as part of a successful parse. For parse actions that perform additional - validation, then call_during_try should be passed as True, so that the validation - code is included in the preliminary "try" parses. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parse_string` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - # parse dates in the form YYYY/MM/DD - - # use parse action to convert toks from str to int at parse time - def convert_to_int(toks): - return int(toks[0]) - - # use a parse action to verify that the date is a valid date - def is_valid_date(toks): - from datetime import date - year, month, day = toks[::2] - try: - date(year, month, day) - except ValueError: - raise ParseException("invalid date given") - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - # add parse actions - integer.set_parse_action(convert_to_int) - date_str.set_parse_action(is_valid_date) - - # note that integer fields are now ints, not strings - date_str.run_tests(''' - # successful parse - note that integer fields were converted to ints - 1999/12/31 - - # fail - invalid date - 1999/13/31 - ''') - """ - if list(fns) == [None]: - self.parseAction = [] - else: - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. - - See examples in :class:`copy`. - """ - self.parseAction += list(map(_trim_arity, list(fns))) - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": - """Add a boolean predicate function to expression's list of parse actions. See - :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, - functions passed to ``add_condition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise - ParseException - - call_during_try = boolean to indicate if this method should be called during internal tryParse calls, - default=False - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), - (line:1, col:1) - """ - for fn in fns: - self.parseAction.append( - condition_as_parse_action( - fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False) - ) - ) - - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": - """ - Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s, loc, expr, err)`` where: - - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables(self, instring, loc): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc, dummy = e._parse(instring, loc) - exprsFound = True - except ParseException: - pass - return loc - - def preParse(self, instring, loc): - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - - if self.skipWhitespace: - instrlen = len(instring) - white_chars = self.whiteChars - while loc < instrlen and instring[loc] in white_chars: - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - def postParse(self, instring, loc, tokenlist): - return tokenlist - - # @profile - def _parseNoCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - TRY, MATCH, FAIL = 0, 1, 2 - debugging = self.debug # and doActions) - len_instring = len(instring) - - if debugging or self.failAction: - # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) - try: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.debugActions[TRY]: - self.debugActions[TRY](instring, tokens_start, self) - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except Exception as err: - # print("Exception raised:", err) - if self.debugActions[FAIL]: - self.debugActions[FAIL](instring, tokens_start, self, err) - if self.failAction: - self.failAction(instring, tokens_start, self, err) - raise - else: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - - tokens = self.postParse(instring, loc, tokens) - - ret_tokens = ParseResults( - tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults - ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - except Exception as err: - # print "Exception raised in user parse action:", err - if self.debugActions[FAIL]: - self.debugActions[FAIL](instring, tokens_start, self, err) - raise - else: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - if debugging: - # print("Matched", self, "->", ret_tokens.as_list()) - if self.debugActions[MATCH]: - self.debugActions[MATCH](instring, tokens_start, loc, self, ret_tokens) - - return loc, ret_tokens - - def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int: - try: - return self._parse(instring, loc, doActions=False)[0] - except ParseFatalException: - if raise_fatal: - raise - raise ParseException(instring, loc, self.errmsg, self) - - def can_parse_next(self, instring: str, loc: int) -> bool: - try: - self.try_parse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - # cache for left-recursion in Forward references - recursion_lock = RLock() - recursion_memos: DictType[ - Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] - ] = {} - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = ( - {} - ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - HIT, MISS = 0, 1 - TRY, MATCH, FAIL = 0, 1, 2 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy(), loc)) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if self.debug and self.debugActions[TRY]: - try: - self.debugActions[TRY](instring, loc, self, cache_hit=True) - except TypeError: - pass - if isinstance(value, Exception): - if self.debug and self.debugActions[FAIL]: - try: - self.debugActions[FAIL]( - instring, loc, self, value, cache_hit=True - ) - except TypeError: - pass - raise value - - loc_, result, endloc = value[0], value[1].copy(), value[2] - if self.debug and self.debugActions[MATCH]: - try: - self.debugActions[MATCH]( - instring, loc_, endloc, self, result, cache_hit=True - ) - except TypeError: - pass - - return loc_, result - - _parse = _parseNoCache - - @staticmethod - def reset_cache() -> None: - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len( - ParserElement.packrat_cache_stats - ) - ParserElement.recursion_memos.clear() - - _packratEnabled = False - _left_recursion_enabled = False - - @staticmethod - def disable_memoization() -> None: - """ - Disables active Packrat or Left Recursion parsing and their memoization - - This method also works if neither Packrat nor Left Recursion are enabled. - This makes it safe to call before activating Packrat nor Left Recursion - to clear any previous settings. - """ - ParserElement.reset_cache() - ParserElement._left_recursion_enabled = False - ParserElement._packratEnabled = False - ParserElement._parse = ParserElement._parseNoCache - - @staticmethod - def enable_left_recursion( - cache_size_limit: OptionalType[int] = None, *, force=False - ) -> None: - """ - Enables "bounded recursion" parsing, which allows for both direct and indirect - left-recursion. During parsing, left-recursive :class:`Forward` elements are - repeatedly matched with a fixed recursion depth that is gradually increased - until finding the longest match. - - Example:: - - import pyparsing as pp - pp.ParserElement.enable_left_recursion() - - E = pp.Forward("E") - num = pp.Word(pp.nums) - # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... - E <<= E + '+' - num | num - - print(E.parse_string("1+2+3")) - - Recursion search naturally memoizes matches of ``Forward`` elements and may - thus skip reevaluation of parse actions during backtracking. This may break - programs with parse actions which rely on strict ordering of side-effects. - - Parameters: - - - cache_size_limit - (default=``None``) - memoize at most this many - ``Forward`` elements during matching; if ``None`` (the default), - memoize all ``Forward`` elements. - - Bounded Recursion parsing works similar but not identical to Packrat parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._packratEnabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if cache_size_limit is None: - ParserElement.recursion_memos = _UnboundedMemo() - elif cache_size_limit > 0: - ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) - else: - raise NotImplementedError("Memo size of %s" % cache_size_limit) - ParserElement._left_recursion_enabled = True - - @staticmethod - def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: - """ - Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - cache_size_limit - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enable_packrat`. - For best results, call ``enable_packrat()`` immediately after - importing pyparsing. - - Example:: - - import pyparsing - pyparsing.ParserElement.enable_packrat() - - Packrat parsing works similar but not identical to Bounded Recursion parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._left_recursion_enabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = _UnboundedCache() - else: - ParserElement.packrat_cache = _FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parse_string( - self, instring: str, parse_all: bool = False, *, parseAll: bool = False - ) -> ParseResults: - """ - Parse a string with respect to the parser definition. This function is intended as the primary interface to the - client code. - - :param instring: The input string to be parsed. - :param parse_all: If set, the entire input string must match the grammar. - :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. - :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. - :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or - an object with attributes if the given parser includes results names. - - If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This - is also equivalent to ending the grammar with :class:`StringEnd`(). - - To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are - converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string - contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string - being parsed, one can ensure a consistent view of the input string by doing one of the following: - - - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), - - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the - parse action's ``s`` argument, or - - explicitly expand the tabs in your input string before calling ``parse_string``. - - Examples: - - By default, partial matches are OK. - - >>> res = Word('a').parse_string('aaaaabaaa') - >>> print(res) - ['aaaaa'] - - The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children - directly to see more examples. - - It raises an exception if parse_all flag is set and instring does not match the whole grammar. - - >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) - Traceback (most recent call last): - ... - pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) - """ - parseAll = parse_all or parseAll - - ParserElement.reset_cache() - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse(instring, 0) - if parseAll: - loc = self.preParse(instring, loc) - se = Empty() + StringEnd() - se._parse(instring, loc) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - raise exc.with_traceback(None) - else: - return tokens - - def scan_string( - self, - instring: str, - max_matches: int = _MAX_INT, - overlap: bool = False, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> Generator[Tuple[ParseResults, int, int], None, None]: - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``max_matches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parse_string` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens, start, end in Word(alphas).scan_string(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - maxMatches = min(maxMatches, max_matches) - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = str(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn(instring, loc) - nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) - except ParseException: - loc = preloc + 1 - else: - if nextLoc > loc: - matches += 1 - if debug: - print( - { - "tokens": tokens.asList(), - "start": preloc, - "end": nextLoc, - } - ) - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn(instring, loc) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc + 1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def transform_string(self, instring: str, *, debug: bool = False) -> str: - """ - Extension to :class:`scan_string`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transform_string``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transform_string()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transform_string()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.set_parse_action(lambda toks: toks[0].title()) - - print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transform_string and scan_string - self.keepTabs = True - try: - for t, s, e in self.scan_string(instring, debug=debug): - out.append(instring[lastE:s]) - if t: - if isinstance(t, ParseResults): - out += t.as_list() - elif isinstance(t, Iterable) and not isinstance(t, str_type): - out += list(t) - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join(map(str, _flatten(out))) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def search_string( - self, - instring: str, - max_matches: int = _MAX_INT, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> ParseResults: - """ - Another extension to :class:`scan_string`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``max_matches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - maxMatches = min(maxMatches, max_matches) - try: - return ParseResults( - [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] - ) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def split( - self, - instring: str, - maxsplit: int = _MAX_INT, - include_separators: bool = False, - *, - includeSeparators=False, - ) -> Generator[str, None, None]: - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``include_separators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = one_of(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - includeSeparators = includeSeparators or include_separators - last = 0 - for t, s, e in self.scan_string(instring, max_matches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other): - """ - Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` - converts them to :class:`Literal`s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print(hello, "->", greet.parse_string(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - - ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. - - Literal('start') + ... + Literal('end') - - is equivalent to: - - Literal('start') + SkipTo('end')("_skipped*") + Literal('end') - - Note that the skipped text is returned with '_skipped' as a results name, - and to support having multiple skips in the same parser, the value returned is - a list of all skipped text. - """ - if other is Ellipsis: - return _PendingSkip(self) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return And([self, other]) - - def __radd__(self, other): - """ - Implementation of ``+`` operator when left operand is not a :class:`ParserElement` - """ - if other is Ellipsis: - return SkipTo(self)("_skipped*") + self - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other + self - - def __sub__(self, other): - """ - Implementation of ``-`` operator, returns :class:`And` with error stop - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return self + And._ErrorStop() + other - - def __rsub__(self, other): - """ - Implementation of ``-`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other - self - - def __mul__(self, other): - """ - Implementation of ``*`` operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer - tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None, n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None, n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None, n) + ~expr`` - """ - if other is Ellipsis: - other = (0, None) - elif isinstance(other, tuple) and other[:1] == (Ellipsis,): - other = ((0,) + other[1:] + (None,))[:2] - - if isinstance(other, int): - minElements, optElements = other, 0 - elif isinstance(other, tuple): - other = tuple(o if o is not Ellipsis else None for o in other) - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0], int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self * other[0] + ZeroOrMore(self) - elif isinstance(other[0], int) and isinstance(other[1], int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError( - "cannot multiply ParserElement and ({}) objects".format( - ",".join(type(item).__name__ for item in other) - ) - ) - else: - raise TypeError( - "cannot multiply ParserElement and {} objects".format( - type(other).__name__ - ) - ) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError( - "second tuple value must be greater or equal to first tuple value" - ) - if minElements == optElements == 0: - return And([]) - - if optElements: - - def makeOptionalList(n): - if n > 1: - return Opt(self + makeOptionalList(n - 1)) - else: - return Opt(self) - - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self] * minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self] * minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other): - """ - Implementation of ``|`` operator - returns :class:`MatchFirst` - """ - if other is Ellipsis: - return _PendingSkip(self, must_skip=True) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return MatchFirst([self, other]) - - def __ror__(self, other): - """ - Implementation of ``|`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other | self - - def __xor__(self, other): - """ - Implementation of ``^`` operator - returns :class:`Or` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Or([self, other]) - - def __rxor__(self, other): - """ - Implementation of ``^`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other ^ self - - def __and__(self, other): - """ - Implementation of ``&`` operator - returns :class:`Each` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Each([self, other]) - - def __rand__(self, other): - """ - Implementation of ``&`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other & self - - def __invert__(self): - """ - Implementation of ``~`` operator - returns :class:`NotAny` - """ - return NotAny(self) - - # disable __iter__ to override legacy use of sequential access to __getitem__ to - # iterate over a sequence - __iter__ = None - - def __getitem__(self, key): - """ - use ``[]`` indexing notation as a short form for expression repetition: - - - ``expr[n]`` is equivalent to ``expr*n`` - - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` - - ``expr[n, ...]`` or ``expr[n,]`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` - - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` - - ``None`` may be used in place of ``...``. - - Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception - if more than ``n`` ``expr``s exist in the input stream. If this behavior is - desired, then write ``expr[..., n] + ~expr``. - """ - - # convert single arg keys to tuples - try: - if isinstance(key, str_type): - key = (key,) - iter(key) - except TypeError: - key = (key, key) - - if len(key) > 2: - raise TypeError( - "only 1 or 2 index arguments supported ({}{})".format( - key[:5], "... [{}]".format(len(key)) if len(key) > 5 else "" - ) - ) - - # clip to 2 elements - ret = self * tuple(key[:2]) - return ret - - def __call__(self, name: str = None): - """ - Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be - passed as ``True``. - - If ``name` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") - userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") - """ - if name is not None: - return self._setResultsName(name) - else: - return self.copy() - - def suppress(self) -> "ParserElement": - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress(self) - - def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Enables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. - - :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = True - return self - - def leave_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - - :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = False - return self - - def set_whitespace_chars( - self, chars: Union[Set[str], str], copy_defaults: bool = False - ) -> "ParserElement": - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = set(chars) - self.copyDefaultWhiteChars = copy_defaults - return self - - def parse_with_tabs(self) -> "ParserElement": - """ - Overrides default behavior to expand ```` s to spaces before parsing the input string. - Must be called before ``parse_string`` when the input grammar contains elements that - match ```` characters. - """ - self.keepTabs = True - return self - - def ignore(self, other: "ParserElement") -> "ParserElement": - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = OneOrMore(Word(alphas)) - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj'] - - patt.ignore(c_style_comment) - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj', 'lskjd'] - """ - import typing - - if isinstance(other, str_type): - other = Suppress(other) - - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append(Suppress(other.copy())) - return self - - def set_debug_actions( - self, - start_action: DebugStartAction, - success_action: DebugSuccessAction, - exception_action: DebugExceptionAction, - ) -> "ParserElement": - """ - Customize display of debugging messages while doing pattern matching: - - - ``start_action`` - method to be called when an expression is about to be parsed; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` - - - ``success_action`` - method to be called when an expression has successfully parsed; - should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` - - - ``exception_action`` - method to be called when expression fails to parse; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` - """ - self.debugActions = ( - start_action or _default_start_debug_action, - success_action or _default_success_debug_action, - exception_action or _default_exception_debug_action, - ) - self.debug = True - return self - - def set_debug(self, flag=True) -> "ParserElement": - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to ``True`` to enable, ``False`` to disable. - - Example:: - - wd = Word(alphas).set_name("alphaword") - integer = Word(nums).set_name("numword") - term = wd | integer - - # turn on debugging for wd - wd.set_debug() - - OneOrMore(term).parse_string("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`set_debug_actions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. - """ - if flag: - self.set_debug_actions( - _default_start_debug_action, - _default_success_debug_action, - _default_exception_debug_action, - ) - else: - self.debug = False - return self - - @property - def default_name(self) -> str: - if self._defaultName is None: - self._defaultName = self._generateDefaultName() - return self._defaultName - - @abstractmethod - def _generateDefaultName(self): - """ - Child classes must define this method, which defines how the ``default_name`` is set. - """ - - def set_name(self, name: str) -> "ParserElement": - """ - Define name for this expression, makes debugging and exception messages clearer. - Example:: - Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) - Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.customName = name - self.errmsg = "Expected " + self.name - if __diag__.enable_debug_on_named_expressions: - self.set_debug() - return self - - @property - def name(self) -> str: - # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name - return self.customName if self.customName is not None else self.default_name - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return str(self) - - def streamline(self) -> "ParserElement": - self.streamlined = True - self._defaultName = None - return self - - def recurse(self): - return [] - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.recurse(): - e._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None): - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self._checkRecursion([]) - - def parse_file( - self, - file_or_filename: Union[str, Path, TextIO], - encoding: str = "utf-8", - parse_all: bool = False, - *, - parseAll: bool = False, - ) -> ParseResults: - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - parseAll = parseAll or parse_all - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r", encoding=encoding) as f: - file_contents = f.read() - try: - return self.parse_string(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def __eq__(self, other): - if self is other: - return True - elif isinstance(other, str_type): - return self.matches(other, parse_all=True) - elif isinstance(other, ParserElement): - return vars(self) == vars(other) - return False - - def __hash__(self): - return id(self) - - def matches( - self, test_string: str, parse_all: bool = True, *, parseAll: bool = True - ) -> bool: - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - ``test_string`` - to test against this expression for a match - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - parseAll = parseAll and parse_all - try: - self.parse_string(str(test_string), parse_all=parseAll) - return True - except ParseBaseException: - return False - - def run_tests( - self, - tests: Union[str, List[str]], - parse_all: bool = True, - comment: OptionalType[Union["ParserElement", str]] = "#", - full_dump: bool = True, - print_results: bool = True, - failure_tests: bool = False, - post_parse: Callable[[str, ParseResults], str] = None, - file: OptionalType[TextIO] = None, - with_line_numbers: bool = False, - *, - parseAll: bool = True, - fullDump: bool = True, - printResults: bool = True, - failureTests: bool = False, - postParse: Callable[[str, ParseResults], str] = None, - ): - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - ``tests`` - a list of separate test strings, or a multiline string of test strings - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - ``print_results`` - (default= ``True``) prints test output to stdout - - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing - - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - ``file`` - (default= ``None``) optional file-like object to which test output will be written; - if None, will default to ``sys.stdout`` - - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failure_tests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.run_tests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.run_tests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failure_tests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading ``'r'``.) - """ - from .testing import pyparsing_test - - parseAll = parseAll and parse_all - fullDump = fullDump and full_dump - printResults = printResults and print_results - failureTests = failureTests or failure_tests - postParse = postParse or post_parse - if isinstance(tests, str_type): - tests = list(map(type(tests).strip, tests.rstrip().splitlines())) - if isinstance(comment, str_type): - comment = Literal(comment) - if file is None: - file = sys.stdout - print_ = file.write - - result: Union[ParseResults, Exception] - allResults = [] - comments = [] - success = True - NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) - BOM = "\ufeff" - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append(pyparsing_test.with_line_numbers(t)) - continue - if not t: - continue - out = [ - "\n" + "\n".join(comments) if comments else "", - pyparsing_test.with_line_numbers(t) if with_line_numbers else t, - ] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = NL.transform_string(t.lstrip(BOM)) - result = self.parse_string(t, parse_all=parseAll) - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - out.append(pe.explain()) - out.append("FAIL: " + str(pe)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(pe.__traceback__)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(exc.__traceback__)) - success = success and failureTests - result = exc - else: - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - if isinstance(pp_value, ParseResults): - out.append(pp_value.dump()) - else: - out.append(str(pp_value)) - else: - out.append(result.dump()) - except Exception as e: - out.append(result.dump(full=fullDump)) - out.append( - "{} failed: {}: {}".format( - postParse.__name__, type(e).__name__, e - ) - ) - else: - out.append(result.dump(full=fullDump)) - out.append("") - - if printResults: - print_("\n".join(out)) - - allResults.append((t, result)) - - return success, allResults - - def create_diagram( - self, - output_html: Union[TextIO, Path, str], - vertical: int = 3, - show_results_names: bool = False, - **kwargs, - ) -> None: - """ - Create a railroad diagram for the parser. - - Parameters: - - output_html (str or file-like object) - output target for generated - diagram HTML - - vertical (int) - threshold for formatting multiple alternatives vertically - instead of horizontally (default=3) - - show_results_names - bool flag whether diagram should show annotations for - defined results names - - Additional diagram-formatting keyword arguments can also be included; - see railroad.Diagram class. - """ - - try: - from .diagram import to_railroad, railroad_to_html - except ImportError as ie: - raise Exception( - "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" - ) from ie - - self.streamline() - - railroad = to_railroad( - self, - vertical=vertical, - show_results_names=show_results_names, - diagram_kwargs=kwargs, - ) - if isinstance(output_html, (str, Path)): - with open(output_html, "w", encoding="utf-8") as diag_file: - diag_file.write(railroad_to_html(railroad)) - else: - # we were passed a file-like object, just write to it - output_html.write(railroad_to_html(railroad)) - - setDefaultWhitespaceChars = set_default_whitespace_chars - inlineLiteralsUsing = inline_literals_using - setResultsName = set_results_name - setBreak = set_break - setParseAction = set_parse_action - addParseAction = add_parse_action - addCondition = add_condition - setFailAction = set_fail_action - tryParse = try_parse - canParseNext = can_parse_next - resetCache = reset_cache - enableLeftRecursion = enable_left_recursion - enablePackrat = enable_packrat - parseString = parse_string - scanString = scan_string - searchString = search_string - transformString = transform_string - setWhitespaceChars = set_whitespace_chars - parseWithTabs = parse_with_tabs - setDebugActions = set_debug_actions - setDebug = set_debug - defaultName = default_name - setName = set_name - parseFile = parse_file - runTests = run_tests - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class _PendingSkip(ParserElement): - # internal placeholder class to hold a place were '...' is added to a parser element, - # once another ParserElement is added, this placeholder will be replaced with a SkipTo - def __init__(self, expr: ParserElement, must_skip: bool = False): - super().__init__() - self.anchor = expr - self.must_skip = must_skip - - def _generateDefaultName(self): - return str(self.anchor + Empty()).replace("Empty", "...") - - def __add__(self, other): - skipper = SkipTo(other).set_name("...")("_skipped*") - if self.must_skip: - - def must_skip(t): - if not t._skipped or t._skipped.as_list() == [""]: - del t[0] - t.pop("_skipped", None) - - def show_skip(t): - if t._skipped.as_list()[-1:] == [""]: - t.pop("_skipped") - t["_skipped"] = "missing <" + repr(self.anchor) + ">" - - return ( - self.anchor + skipper().add_parse_action(must_skip) - | skipper().add_parse_action(show_skip) - ) + other - - return self.anchor + skipper + other - - def __repr__(self): - return self.defaultName - - def parseImpl(self, *args): - raise Exception( - "use of `...` expression without following SkipTo target expression" - ) - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - - def __init__(self): - super().__init__(savelist=False) - - def _generateDefaultName(self): - return type(self).__name__ - - -class Empty(Token): - """ - An empty token, will always match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """ - A token that will never match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl(self, instring, loc, doActions=True): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """ - Token to exactly match a specified string. - - Example:: - - Literal('blah').parse_string('blah') # -> ['blah'] - Literal('blah').parse_string('blahfooblah') # -> ['blah'] - Literal('blah').parse_string('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - super().__init__() - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Literal; use Empty() instead") - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: modify __class__ to select - # a parseImpl optimized for single-character check - if self.matchLen == 1 and type(self) is Literal: - self.__class__ = _SingleCharLiteral - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar and instring.startswith( - self.match, loc - ): - return loc + self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -class _SingleCharLiteral(Literal): - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar: - return loc + 1, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -ParserElement._literalStringClass = Literal - - -class Keyword(Token): - """ - Token to exactly match a specified string as a keyword, that is, - it must be immediately followed by a non-keyword character. Compare - with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``identChars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parse_string("start") # -> ['start'] - Keyword("start").parse_string("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - - DEFAULT_KEYWORD_CHARS = alphanums + "_$" - - def __init__( - self, - match_string: str = "", - ident_chars: OptionalType[str] = None, - caseless: bool = False, - *, - matchString: str = "", - identChars: OptionalType[str] = None, - ): - super().__init__() - identChars = identChars or ident_chars - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Keyword; use Empty() instead") - self.errmsg = "Expected {} {}".format(type(self).__name__, self.name) - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = match_string.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - errmsg = self.errmsg - errloc = loc - if self.caseless: - if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: - if loc == 0 or instring[loc - 1].upper() not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen].upper() not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ", was immediately followed by keyword character" - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - else: - if ( - instring[loc] == self.firstMatchChar - and self.matchLen == 1 - or instring.startswith(self.match, loc) - ): - if loc == 0 or instring[loc - 1] not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ( - ", keyword was immediately followed by keyword character" - ) - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - raise ParseException(instring, errloc, errmsg, self) - - @staticmethod - def set_default_keyword_chars(chars): - """ - Overrides the default characters used by :class:`Keyword` expressions. - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - - setDefaultKeywordChars = set_default_keyword_chars - - -class CaselessLiteral(Literal): - """ - Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - OneOrMore(CaselessLiteral("CMD")).parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - match_string = matchString or match_string - super().__init__(match_string.upper()) - # Preserve the defining literal. - self.returnString = match_string - self.errmsg = "Expected " + self.name - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc : loc + self.matchLen].upper() == self.match: - return loc + self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - OneOrMore(CaselessKeyword("CMD")).parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - - def __init__( - self, - match_string: str = "", - ident_chars: OptionalType[str] = None, - *, - matchString: str = "", - identChars: OptionalType[str] = None, - ): - identChars = identChars or ident_chars - match_string = matchString or match_string - super().__init__(match_string, identChars, caseless=True) - - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters - - ``max_mismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) - patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - - def __init__( - self, - match_string: str, - max_mismatches: int = None, - *, - maxMismatches: int = 1, - caseless=False, - ): - maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches - super().__init__() - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected {!r} (with up to {} mismatches)".format( - self.match_string, self.maxMismatches - ) - self.caseless = caseless - self.mayIndexError = False - self.mayReturnEmpty = False - - def _generateDefaultName(self): - return "{}:{!r}".format(type(self).__name__, self.match_string) - - def parseImpl(self, instring, loc, doActions=True): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc, s_m in enumerate( - zip(instring[loc:maxloc], match_string) - ): - src, mat = s_m - if self.caseless: - src, mat = src.lower(), mat.lower() - - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = start + match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results["original"] = match_string - results["mismatches"] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - Parameters: - - ``init_chars`` - string of all characters that should be used to - match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; - if ``body_chars`` is also specified, then this is the string of - initial characters - - ``body_chars`` - string of characters that - can be used for matching after a matched initial character as - given in ``init_chars``; if omitted, same as the initial characters - (default=``None``) - - ``min`` - minimum number of characters to match (default=1) - - ``max`` - maximum number of characters to match (default=0) - - ``exact`` - exact number of characters to match (default=0) - - ``as_keyword`` - match as a keyword (default=``False``) - - ``exclude_chars`` - characters that might be - found in the input ``body_chars`` string but which should not be - accepted for matching ;useful to define a word of all - printables except for one or two characters, for instance - (default=``None``) - - :class:`srange` is useful for defining custom character set strings - for defining :class:`Word` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - ``alphas``, ``nums``, and ``printables`` are also defined in several - Unicode sets - see :class:`pyparsing_unicode``. - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums + '-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, exclude_chars=",") - """ - - def __init__( - self, - init_chars: str = "", - body_chars: OptionalType[str] = None, - min: int = 1, - max: int = 0, - exact: int = 0, - as_keyword: bool = False, - exclude_chars: OptionalType[str] = None, - *, - initChars: OptionalType[str] = None, - bodyChars: OptionalType[str] = None, - asKeyword: bool = False, - excludeChars: OptionalType[str] = None, - ): - initChars = initChars or init_chars - bodyChars = bodyChars or body_chars - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__() - if not initChars: - raise ValueError( - "invalid {}, initChars cannot be empty string".format( - type(self).__name__ - ) - ) - - initChars = set(initChars) - self.initChars = initChars - if excludeChars: - excludeChars = set(excludeChars) - initChars -= excludeChars - if bodyChars: - bodyChars = set(bodyChars) - excludeChars - self.initCharsOrig = "".join(sorted(initChars)) - - if bodyChars: - self.bodyCharsOrig = "".join(sorted(bodyChars)) - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = "".join(sorted(initChars)) - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - # see if we can make a regex for this Word - if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0): - if self.bodyChars == self.initChars: - if max == 0: - repeat = "+" - elif max == 1: - repeat = "" - else: - repeat = "{{{},{}}}".format( - self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen - ) - self.reString = "[{}]{}".format( - _collapse_string_to_ranges(self.initChars), - repeat, - ) - elif len(self.initChars) == 1: - if max == 0: - repeat = "*" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "{}[{}]{}".format( - re.escape(self.initCharsOrig), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - else: - if max == 0: - repeat = "*" - elif max == 2: - repeat = "" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "[{}][{}]{}".format( - _collapse_string_to_ranges(self.initChars), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - if self.asKeyword: - self.reString = r"\b" + self.reString + r"\b" - - try: - self.re = re.compile(self.reString) - except sre_constants.error: - self.re = None - else: - self.re_match = self.re.match - self.__class__ = _WordRegex - - def _generateDefaultName(self): - def charsAsStr(s): - max_repr_len = 16 - s = _collapse_string_to_ranges(s, re_escape=False) - if len(s) > max_repr_len: - return s[: max_repr_len - 3] + "..." - else: - return s - - if self.initChars != self.bodyChars: - base = "W:({}, {})".format( - charsAsStr(self.initChars), charsAsStr(self.bodyChars) - ) - else: - base = "W:({})".format(charsAsStr(self.initChars)) - - # add length specification - if self.minLen > 1 or self.maxLen != _MAX_INT: - if self.minLen == self.maxLen: - if self.minLen == 1: - return base[2:] - else: - return base + "{{{}}}".format(self.minLen) - elif self.maxLen == _MAX_INT: - return base + "{{{},...}}".format(self.minLen) - else: - return base + "{{{},{}}}".format(self.minLen, self.maxLen) - return base - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.initChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min(maxloc, instrlen) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - elif self.asKeyword: - if ( - start > 0 - and instring[start - 1] in bodychars - or loc < instrlen - and instring[loc] in bodychars - ): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _WordRegex(Word): - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - -class Char(_WordRegex): - """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - - def __init__( - self, - charset: str, - as_keyword: bool = False, - exclude_chars: OptionalType[str] = None, - *, - asKeyword: bool = False, - excludeChars: OptionalType[str] = None, - ): - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__( - charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars - ) - self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars)) - if asKeyword: - self.reString = r"\b{}\b".format(self.reString) - self.re = re.compile(self.reString) - self.re_match = self.re.match - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module `_. - If the given regex contains named groups (defined using ``(?P...)``), - these will be preserved as named :class:`ParseResults`. - - If instead of the Python stdlib ``re`` module you wish to use a different RE module - (such as the ``regex`` module), you can do so by building your ``Regex`` object with - a compiled RE that was compiled using ``regex``. - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - - # named fields in a regex will be returned as named results - date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') - - # the Regex class will accept re's compiled using the regex module - import regex - parser = pp.Regex(regex.compile(r'[0-9]')) - """ - - def __init__( - self, - pattern: Any, - flags: Union[re.RegexFlag, int] = 0, - as_group_list: bool = False, - as_match: bool = False, - *, - asGroupList: bool = False, - asMatch: bool = False, - ): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module `_ module for an - explanation of the acceptable patterns and flags. - """ - super().__init__() - asGroupList = asGroupList or as_group_list - asMatch = asMatch or as_match - - if isinstance(pattern, str_type): - if not pattern: - raise ValueError("null string passed to Regex; use Empty() instead") - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - raise ValueError( - "invalid pattern ({!r}) passed to Regex".format(pattern) - ) - - elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): - self.re = pattern - self.pattern = self.reString = pattern.pattern - self.flags = flags - - else: - raise TypeError( - "Regex may only be constructed with a string or a compiled RE object" - ) - - self.re_match = self.re.match - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = self.re_match("") is not None - self.asGroupList = asGroupList - self.asMatch = asMatch - if self.asGroupList: - self.parseImpl = self.parseImplAsGroupList - if self.asMatch: - self.parseImpl = self.parseImplAsMatch - - def _generateDefaultName(self): - return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) - - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc, ret - - def parseImplAsGroupList(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.groups() - return loc, ret - - def parseImplAsMatch(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result - return loc, ret - - def sub(self, repl): - r""" - Return :class:`Regex` with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) `_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") - print(make_html.transform_string("h1:main title:")) - # prints "

main title

" - """ - if self.asGroupList: - raise TypeError("cannot use sub() with Regex(asGroupList=True)") - - if self.asMatch and callable(repl): - raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)") - - if self.asMatch: - - def pa(tokens): - return tokens[0].expand(repl) - - else: - - def pa(tokens): - return self.re.sub(repl, tokens[0]) - - return self.add_parse_action(pa) - - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - ``quote_char`` - string of one or more characters defining the - quote delimiting string - - ``esc_char`` - character to re_escape quotes, typically backslash - (default= ``None``) - - ``esc_quote`` - special quote sequence to re_escape an embedded quote - string (such as SQL's ``""`` to re_escape an embedded ``"``) - (default= ``None``) - - ``multiline`` - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - ``unquote_results`` - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - ``end_quote_char`` - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quote_char) - - ``convert_whitespace_escapes`` - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.search_string('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', end_quote_char='}}') - print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', esc_quote='""') - print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")) - - def __init__( - self, - quote_char: str = "", - esc_char: OptionalType[str] = None, - esc_quote: OptionalType[str] = None, - multiline: bool = False, - unquote_results: bool = True, - end_quote_char: OptionalType[str] = None, - convert_whitespace_escapes: bool = True, - *, - quoteChar: str = "", - escChar: OptionalType[str] = None, - escQuote: OptionalType[str] = None, - unquoteResults: bool = True, - endQuoteChar: OptionalType[str] = None, - convertWhitespaceEscapes: bool = True, - ): - super().__init__() - escChar = escChar or esc_char - escQuote = escQuote or esc_quote - unquoteResults = unquoteResults and unquote_results - endQuoteChar = endQuoteChar or end_quote_char - convertWhitespaceEscapes = ( - convertWhitespaceEscapes and convert_whitespace_escapes - ) - quote_char = quoteChar or quote_char - - # remove white space from quote chars - wont work anyway - quote_char = quote_char.strip() - if not quote_char: - raise ValueError("quote_char cannot be the empty string") - - if endQuoteChar is None: - endQuoteChar = quote_char - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - raise ValueError("endQuoteChar cannot be the empty string") - - self.quoteChar = quote_char - self.quoteCharLen = len(quote_char) - self.firstQuoteChar = quote_char[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - sep = "" - inner_pattern = "" - - if escQuote: - inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote)) - sep = "|" - - if escChar: - inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar)) - sep = "|" - self.escCharReplacePattern = re.escape(self.escChar) + "(.)" - - if len(self.endQuoteChar) > 1: - inner_pattern += ( - "{}(?:".format(sep) - + "|".join( - "(?:{}(?!{}))".format( - re.escape(self.endQuoteChar[:i]), - _escape_regex_range_chars(self.endQuoteChar[i:]), - ) - for i in range(len(self.endQuoteChar) - 1, 0, -1) - ) - + ")" - ) - sep = "|" - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - inner_pattern += r"{}(?:[^{}{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - else: - self.flags = 0 - inner_pattern += r"{}(?:[^{}\n\r{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - - self.pattern = "".join( - [ - re.escape(self.quoteChar), - "(?:", - inner_pattern, - ")*", - re.escape(self.endQuoteChar), - ] - ) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - self.re_match = self.re.match - except sre_constants.error: - raise ValueError( - "invalid pattern {!r} passed to Regex".format(self.pattern) - ) - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def _generateDefaultName(self): - if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type): - return "string enclosed in {!r}".format(self.quoteChar) - - return "quoted string, starting with {} ending with {}".format( - self.quoteChar, self.endQuoteChar - ) - - def parseImpl(self, instring, loc, doActions=True): - result = ( - instring[loc] == self.firstQuoteChar - and self.re_match(instring, loc) - or None - ) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen : -self.endQuoteCharLen] - - if isinstance(ret, str_type): - # replace escaped whitespace - if "\\" in ret and self.convertWhitespaceEscapes: - for wslit, wschar in self.ws_map: - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - - def __init__( - self, - not_chars: str = "", - min: int = 1, - max: int = 0, - exact: int = 0, - *, - notChars: str = "", - ): - super().__init__() - self.skipWhitespace = False - self.notChars = not_chars or notChars - self.notCharsSet = set(self.notChars) - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use " - "Opt(CharsNotIn()) if zero-length char group is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = self.minLen == 0 - self.mayIndexError = False - - def _generateDefaultName(self): - not_chars_str = _collapse_string_to_ranges(self.notChars) - if len(not_chars_str) > 16: - return "!W:({}...)".format(self.notChars[: 16 - 3]) - else: - return "!W:({})".format(self.notChars) - - def parseImpl(self, instring, loc, doActions=True): - notchars = self.notCharsSet - if instring[loc] in notchars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - - whiteStrs = { - " ": "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", - "\u00A0": "", - "\u1680": "", - "\u180E": "", - "\u2000": "", - "\u2001": "", - "\u2002": "", - "\u2003": "", - "\u2004": "", - "\u2005": "", - "\u2006": "", - "\u2007": "", - "\u2008": "", - "\u2009": "", - "\u200A": "", - "\u200B": "", - "\u202F": "", - "\u205F": "", - "\u3000": "", - } - - def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): - super().__init__() - self.matchWhite = ws - self.set_whitespace_chars( - "".join(c for c in self.whiteChars if c not in self.matchWhite), - copy_defaults=True, - ) - # self.leave_whitespace() - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def _generateDefaultName(self): - return "".join(White.whiteStrs[c] for c in self.matchWhite) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class PositionToken(Token): - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class GoToColumn(PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - - def __init__(self, colno: int): - super().__init__() - self.col = colno - - def preParse(self, instring, loc): - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while ( - loc < instrlen - and instring[loc].isspace() - and col(loc, instring) != self.col - ): - loc += 1 - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc:newloc] - return newloc, ret - - -class LineStart(PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self): - super().__init__() - self.leave_whitespace() - self.orig_whiteChars = set() | self.whiteChars - self.whiteChars.discard("\n") - self.skipper = Empty().set_whitespace_chars(self.whiteChars) - self.errmsg = "Expected start of line" - - def preParse(self, instring, loc): - if loc == 0: - return loc - else: - ret = self.skipper.preParse(instring, loc) - if "\n" in self.orig_whiteChars: - while instring[ret : ret + 1] == "\n": - ret = self.skipper.preParse(instring, ret + 1) - return ret - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - - -class LineEnd(PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - - def __init__(self): - super().__init__() - self.whiteChars.discard("\n") - self.set_whitespace_chars(self.whiteChars, copy_defaults=False) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class StringStart(PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class StringEnd(PositionToken): - """ - Matches if current position is at the end of the parse string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class WordStart(PositionToken): - """Matches if the current position is at the beginning of a - :class:`Word`, and is not preceded by any character in a given - set of ``word_chars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars != printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if ( - instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class WordEnd(PositionToken): - """Matches if the current position is at the end of a :class:`Word`, - and is not followed by any character in a given set of ``word_chars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars != printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if ( - instring[loc] in self.wordChars - or instring[loc - 1] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - - def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): - super().__init__(savelist) - self.exprs: List[ParserElement] - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, str_type): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, str_type) for expr in exprs): - exprs = ( - self._literalStringClass(e) if isinstance(e, str_type) else e - for e in exprs - ) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def recurse(self): - return self.exprs[:] - - def append(self, other): - self.exprs.append(other) - self._defaultName = None - return self - - def leave_whitespace(self, recursive=True): - """ - Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().leave_whitespace(recursive) - - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive=True): - """ - Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().ignore_whitespace(recursive) - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.ignore_whitespace(recursive) - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.exprs)) - - def streamline(self): - if self.streamlined: - return self - - super().streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) - if len(self.exprs) == 2: - other = self.exprs[0] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = other.exprs[:] + [self.exprs[1]] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = self.exprs[:-1] + other.exprs[:] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + str(self) - - return self - - def validate(self, validateTrace=None): - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self._checkRecursion([]) - - def copy(self): - ret = super().copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in self.exprs: - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = OneOrMore(Word(alphas)) - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.leave_whitespace() - - def _generateDefaultName(self): - return "-" - - def __init__(self, exprs_arg: IterableType[ParserElement], savelist: bool = True): - exprs: List[ParserElement] = list(exprs_arg) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception( - "cannot construct And with sequence ending in ..." - ) - else: - tmp.append(expr) - exprs[:] = tmp - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.set_whitespace_chars( - self.exprs[0].whiteChars, - copy_defaults=self.exprs[0].copyDefaultWhiteChars, - ) - self.skipWhitespace = self.exprs[0].skipWhitespace - else: - self.mayReturnEmpty = True - self.callPreparse = True - - def streamline(self) -> ParserElement: - # collapse any _PendingSkip's - if self.exprs: - if any( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1] - ): - for i, e in enumerate(self.exprs[:-1]): - if e is None: - continue - if ( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - ): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = None - self.exprs = [e for e in self.exprs if e is not None] - - super().streamline() - - # link any IndentedBlocks to the prior expression - for prev, cur in zip(self.exprs, self.exprs[1:]): - # traverse cur or any first embedded expr of cur looking for an IndentedBlock - # (but watch out for recursive grammar) - seen = set() - while cur: - if id(cur) in seen: - break - seen.add(id(cur)) - if isinstance(cur, IndentedBlock): - prev.add_parse_action( - lambda s, l, t: setattr(cur, "parent_anchor", col(l, s)) - ) - break - subs = cur.recurse() - cur = next(iter(subs), None) - - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as callPreParse arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( - instring, loc, doActions, callPreParse=False - ) - errorStop = False - for e in self.exprs[1:]: - # if isinstance(e, And._ErrorStop): - if type(e) is And._ErrorStop: - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException( - instring, len(instring), self.errmsg, self - ) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # And([self, other]) - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e._checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def _generateDefaultName(self): - inner = " ".join(str(e) for e in self.exprs) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "{" + inner + "}" - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.saveAsList = False - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - fatals = [] - if all(e.callPreparse for e in self.exprs): - loc = self.preParse(instring, loc) - for e in self.exprs: - try: - loc2 = e.try_parse(instring, loc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - maxException = None - maxExcLoc = -1 - except ParseException as err: - if not fatals: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ixor__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # Or([self, other]) - - def _generateDefaultName(self): - return "{" + " ^ ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose" - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - more than one expression matches, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - if self.exprs: - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.saveAsList = False - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - - for e in self.exprs: - try: - return e._parse( - instring, - loc, - doActions, - ) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - raise - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ior__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # MatchFirst([self, other]) - - def _generateDefaultName(self): - return "{" + " | ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose" - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) - - shape_spec.run_tests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - - def __init__(self, exprs: IterableType[ParserElement], savelist: bool = True): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict( - (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) - ) - opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] - opt2 = [ - e - for e in self.exprs - if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) - ] - self.optionals = opt1 + opt2 - self.multioptionals = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, _MultipleMatch) - ] - self.multirequired = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, OneOrMore) - ] - self.required = [ - e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) - ] - self.required += self.multirequired - self.initExprGroups = False - - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - multis = self.multioptionals[:] - matchOrder = [] - - keepMatching = True - failed = [] - fatals = [] - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + multis - failed.clear() - fatals.clear() - for e in tmpExprs: - try: - tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - failed.append(e) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - # look for any ParseFatalExceptions - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if tmpReqd: - missing = ", ".join(str(e) for e in tmpReqd) - raise ParseException( - instring, - loc, - "Missing one or more required elements ({})".format(missing), - ) - - # add any unmatched Opts, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] - - total_results = ParseResults([]) - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - total_results += results - - return loc, total_results - - def _generateDefaultName(self): - return "{" + " & ".join(str(e) for e in self.exprs) + "}" - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - super().__init__(savelist) - if isinstance(expr, str_type): - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr) - elif issubclass(type(self), self._literalStringClass): - expr = Literal(expr) - else: - expr = self._literalStringClass(Literal(expr)) - self.expr = expr - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.set_whitespace_chars( - expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars - ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def recurse(self): - return [self.expr] if self.expr is not None else [] - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - else: - raise ParseException("", loc, self.errmsg, self) - - def leave_whitespace(self, recursive=True): - super().leave_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive=True): - super().ignore_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.ignore_whitespace(recursive) - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - return self - - def streamline(self): - super().streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def _checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.expr)) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class IndentedBlock(ParseElementEnhance): - """ - Expression to match one or more expressions at a given indentation level. - Useful for parsing text where structure is implied by indentation (like Python source code). - """ - - class _Indent(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) == ref_col) - - class _IndentGreater(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column greater than {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) > ref_col) - - def __init__( - self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True - ): - super().__init__(expr, savelist=True) - # if recursive: - # raise NotImplementedError("IndentedBlock with recursive is not implemented") - self._recursive = recursive - self._grouped = grouped - self.parent_anchor = 1 - - def parseImpl(self, instring, loc, doActions=True): - # advance parse position to non-whitespace by using an Empty() - # this should be the column to be used for all subsequent indented lines - anchor_loc = Empty().preParse(instring, loc) - - # see if self.expr matches at the current location - if not it will raise an exception - # and no further work is necessary - self.expr.try_parse(instring, anchor_loc, doActions) - - indent_col = col(anchor_loc, instring) - peer_detect_expr = self._Indent(indent_col) - - inner_expr = Empty() + peer_detect_expr + self.expr - if self._recursive: - sub_indent = self._IndentGreater(indent_col) - nested_block = IndentedBlock( - self.expr, recursive=self._recursive, grouped=self._grouped - ) - nested_block.set_debug(self.debug) - nested_block.parent_anchor = indent_col - inner_expr += Opt(sub_indent + nested_block) - - inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") - block = OneOrMore(inner_expr) - - trailing_undent = self._Indent(self.parent_anchor) | StringEnd() - - if self._grouped: - wrapper = Group - else: - wrapper = lambda expr: expr - return (wrapper(block) + Optional(trailing_undent)).parseImpl( - instring, anchor_loc, doActions - ) - - -class AtStringStart(ParseElementEnhance): - """Matches if expression matches at the beginning of the parse - string:: - - AtStringStart(Word(nums)).parse_string("123") - # prints ["123"] - - AtStringStart(Word(nums)).parse_string(" 123") - # raises ParseException - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - raise ParseException(instring, loc, "not found at string start") - return super().parseImpl(instring, loc, doActions) - - -class AtLineStart(ParseElementEnhance): - r"""Matches if an expression matches at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (AtLineStart('AAA') + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) != 1: - raise ParseException(instring, loc, "not found at line start") - return super().parseImpl(instring, loc, doActions) - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - OneOrMore(attr_expr).parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, :class:`Literal`, - :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` - with a specified exact or maximum length, then the retreat - parameter is not required. Otherwise, retreat must be specified to - give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - - def __init__( - self, expr: Union[ParserElement, str], retreat: OptionalType[int] = None - ): - super().__init__(expr) - self.expr = self.expr().leave_whitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str_type): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat) : loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1) + 1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse( - instring_slice, len(instring_slice) - offset - ) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - return loc, ret - - -class Located(ParseElementEnhance): - """ - Decorates a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parse_with_tabs` - - Example:: - - wd = Word(alphas) - for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [0, ['ljsdf'], 5] - [8, ['lksdjjf'], 15] - [18, ['lkkjj'], 23] - - """ - - def parseImpl(self, instring, loc, doActions=True): - start = loc - loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) - ret_tokens = ParseResults([start, tokens, loc]) - ret_tokens["locn_start"] = start - ret_tokens["value"] = tokens - ret_tokens["locn_end"] = loc - if self.resultsName: - # must return as a list, so that the name will be attached to the complete group - return loc, [ret_tokens] - else: - return loc, ret_tokens - - -class NotAny(ParseElementEnhance): - """ - Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the ``'~'`` operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Opt(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infix_notation - boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - # do NOT use self.leave_whitespace(), don't want to propagate to exprs - # self.leave_whitespace() - self.skipWhitespace = False - - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.can_parse_next(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def _generateDefaultName(self): - return "~{" + str(self.expr) + "}" - - -class _MultipleMatch(ParseElementEnhance): - def __init__( - self, - expr: ParserElement, - stop_on: OptionalType[Union[ParserElement, str]] = None, - *, - stopOn: OptionalType[Union[ParserElement, str]] = None, - ): - super().__init__(expr) - stopOn = stopOn or stop_on - self.saveAsList = True - ender = stopOn - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender): - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions) - try: - hasIgnoreExprs = not not self.ignoreExprs - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in [self.expr] + self.expr.recurse(): - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """ - Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stop_on - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - OneOrMore(attr_expr).parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stop_on attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parse_string(text).pprint() - """ - - def _generateDefaultName(self): - return "{" + str(self.expr) + "}..." - - -class ZeroOrMore(_MultipleMatch): - """ - Optional repetition of zero or more of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``stop_on`` - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - (default= ``None``) - - Example: similar to :class:`OneOrMore` - """ - - def __init__( - self, - expr: ParserElement, - stop_on: OptionalType[Union[ParserElement, str]] = None, - *, - stopOn: OptionalType[Union[ParserElement, str]] = None, - ): - super().__init__(expr, stopOn=stopOn or stop_on) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super().parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, ParseResults([], name=self.resultsName) - - def _generateDefaultName(self): - return "[" + str(self.expr) + "]..." - - -class _NullToken: - def __bool__(self): - return False - - def __str__(self): - return "" - - -class Opt(ParseElementEnhance): - """ - Optional matching of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``default`` (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) - zip.run_tests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - - __optionalNotMatched = _NullToken() - - def __init__( - self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched - ): - super().__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - self_expr = self.expr - try: - loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - default_value = self.defaultValue - if default_value is not self.__optionalNotMatched: - if self_expr.resultsName: - tokens = ParseResults([default_value]) - tokens[self_expr.resultsName] = default_value - else: - tokens = [default_value] - else: - tokens = [] - return loc, tokens - - def _generateDefaultName(self): - inner = str(self.expr) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "[" + inner + "]" - - -Optional = Opt - - -class SkipTo(ParseElementEnhance): - """ - Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - ``expr`` - target expression marking the end of the data to be skipped - - ``include`` - if ``True``, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element - list) (default= ``False``). - - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the :class:`SkipTo` is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quoted_string) - string_data.set_parse_action(token_map(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.search_string(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: 6 - - desc: Intermittent system crash - - issue_num: 101 - - sev: Critical - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: 14 - - desc: Spelling error on Login ('log|n') - - issue_num: 94 - - sev: Cosmetic - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: 47 - - desc: System slow when running too many reports - - issue_num: 79 - - sev: Minor - """ - - def __init__( - self, - other: Union[ParserElement, str], - include: bool = False, - ignore: bool = None, - fail_on: OptionalType[Union[ParserElement, str]] = None, - *, - failOn: Union[ParserElement, str] = None, - ): - super().__init__(other) - failOn = failOn or fail_on - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, str_type): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - self_expr_parse = self.expr._parse - self_failOn_canParseNext = ( - self.failOn.canParseNext if self.failOn is not None else None - ) - self_ignoreExpr_tryParse = ( - self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - ) - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - - -class Forward(ParseElementEnhance): - """ - Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the ``'<<'`` operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: - - fwd_expr << a | b | c - - will actually be evaluated as:: - - (fwd_expr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwd_expr << (a | b | c) - - Converting to use the ``'<<='`` operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - - def __init__(self, other: OptionalType[Union[ParserElement, str]] = None): - self.caller_frame = traceback.extract_stack(limit=2)[0] - super().__init__(other, savelist=False) - self.lshift_line = None - - def __lshift__(self, other): - if hasattr(self, "caller_frame"): - del self.caller_frame - if isinstance(other, str_type): - other = self._literalStringClass(other) - self.expr = other - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.set_whitespace_chars( - self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars - ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - self.lshift_line = traceback.extract_stack(limit=2)[-2] - return self - - def __ilshift__(self, other): - return self << other - - def __or__(self, other): - caller_line = traceback.extract_stack(limit=2)[-2] - if ( - __diag__.warn_on_match_first_with_lshift_operator - and caller_line == self.lshift_line - and Diagnostics.warn_on_match_first_with_lshift_operator - not in self.suppress_warnings_ - ): - warnings.warn( - "using '<<' operator with '|' is probably an error, use '<<='", - stacklevel=2, - ) - ret = super().__or__(other) - return ret - - def __del__(self): - # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' - if ( - self.expr is None - and __diag__.warn_on_assignment_to_Forward - and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ - ): - warnings.warn_explicit( - "Forward defined here but no expression attached later using '<<=' or '<<'", - UserWarning, - filename=self.caller_frame.filename, - lineno=self.caller_frame.lineno, - ) - - def parseImpl(self, instring, loc, doActions=True): - if ( - self.expr is None - and __diag__.warn_on_parse_using_empty_Forward - and Diagnostics.warn_on_parse_using_empty_Forward - not in self.suppress_warnings_ - ): - # walk stack until parse_string, scan_string, search_string, or transform_string is found - parse_fns = [ - "parse_string", - "scan_string", - "search_string", - "transform_string", - ] - tb = traceback.extract_stack(limit=200) - for i, frm in enumerate(reversed(tb), start=1): - if frm.name in parse_fns: - stacklevel = i + 1 - break - else: - stacklevel = 2 - warnings.warn( - "Forward expression was never assigned a value, will not parse any input", - stacklevel=stacklevel, - ) - if not ParserElement._left_recursion_enabled: - return super().parseImpl(instring, loc, doActions) - # ## Bounded Recursion algorithm ## - # Recursion only needs to be processed at ``Forward`` elements, since they are - # the only ones that can actually refer to themselves. The general idea is - # to handle recursion stepwise: We start at no recursion, then recurse once, - # recurse twice, ..., until more recursion offers no benefit (we hit the bound). - # - # The "trick" here is that each ``Forward`` gets evaluated in two contexts - # - to *match* a specific recursion level, and - # - to *search* the bounded recursion level - # and the two run concurrently. The *search* must *match* each recursion level - # to find the best possible match. This is handled by a memo table, which - # provides the previous match to the next level match attempt. - # - # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. - # - # There is a complication since we not only *parse* but also *transform* via - # actions: We do not want to run the actions too often while expanding. Thus, - # we expand using `doActions=False` and only run `doActions=True` if the next - # recursion level is acceptable. - with ParserElement.recursion_lock: - memo = ParserElement.recursion_memos - try: - # we are parsing at a specific recursion expansion - use it as-is - prev_loc, prev_result = memo[loc, self, doActions] - if isinstance(prev_result, Exception): - raise prev_result - return prev_loc, prev_result.copy() - except KeyError: - act_key = (loc, self, True) - peek_key = (loc, self, False) - # we are searching for the best recursion expansion - keep on improving - # both `doActions` cases must be tracked separately here! - prev_loc, prev_peek = memo[peek_key] = ( - loc - 1, - ParseException( - instring, loc, "Forward recursion without base case", self - ), - ) - if doActions: - memo[act_key] = memo[peek_key] - while True: - try: - new_loc, new_peek = super().parseImpl(instring, loc, False) - except ParseException: - # we failed before getting any match – do not hide the error - if isinstance(prev_peek, Exception): - raise - new_loc, new_peek = prev_loc, prev_peek - # the match did not get better: we are done - if new_loc <= prev_loc: - if doActions: - # replace the match for doActions=False as well, - # in case the action did backtrack - prev_loc, prev_result = memo[peek_key] = memo[act_key] - del memo[peek_key], memo[act_key] - return prev_loc, prev_result.copy() - del memo[peek_key] - return prev_loc, prev_peek.copy() - # the match did get better: see if we can improve further - else: - if doActions: - try: - memo[act_key] = super().parseImpl(instring, loc, True) - except ParseException as e: - memo[peek_key] = memo[act_key] = (new_loc, e) - raise - prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek - - def leave_whitespace(self, recursive=True): - self.skipWhitespace = False - return self - - def ignore_whitespace(self, recursive=True): - self.skipWhitespace = True - return self - - def streamline(self): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - # Avoid infinite recursion by setting a temporary _defaultName - self._defaultName = ": ..." - - # Use the string representation of main expression. - retString = "..." - try: - if self.expr is not None: - retString = str(self.expr)[:1000] - else: - retString = "None" - finally: - return self.__class__.__name__ + ": " + retString - - def copy(self): - if self.expr is not None: - return super().copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, list_all_matches=False): - if ( - __diag__.warn_name_set_on_empty_Forward - and Diagnostics.warn_name_set_on_empty_Forward - not in self.suppress_warnings_ - ): - if self.expr is None: - warnings.warn( - "{}: setting results name {!r} on {} expression " - "that has no contained expression".format( - "warn_name_set_on_empty_Forward", name, type(self).__name__ - ), - stacklevel=3, - ) - - return super()._setResultsName(name, list_all_matches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist=False): - super().__init__(expr) # , savelist) - self.saveAsList = False - - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parse_string('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) - """ - - def __init__( - self, - expr: ParserElement, - join_string: str = "", - adjacent: bool = True, - *, - joinString: OptionalType[str] = None, - ): - super().__init__(expr) - joinString = joinString if joinString is not None else join_string - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leave_whitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super().ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults( - ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults - ) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - The optional ``aslist`` argument when set to True will return the - parsed tokens as a Python list instead of a pyparsing ParseResults. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Opt(delimited_list(term)) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Opt(delimited_list(term))) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', ['a', 'b', '100']] - """ - - def __init__(self, expr: ParserElement, aslist: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonList = aslist - - def postParse(self, instring, loc, tokenlist): - if self._asPythonList: - return ParseResults.List( - tokenlist.asList() - if isinstance(tokenlist, ParseResults) - else list(tokenlist) - ) - else: - return [tokenlist] - - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - The optional ``asdict`` argument when set to True will return the - parsed tokens as a Python dict instead of a pyparsing ParseResults. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - # print attributes as plain groups - print(OneOrMore(attr_expr).parse_string(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names - result = Dict(OneOrMore(Group(attr_expr))).parse_string(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.as_dict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - - def __init__(self, expr: ParserElement, asdict: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonDict = asdict - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - - ikey = tok[0] - if isinstance(ikey, int): - ikey = str(ikey).strip() - - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - - else: - try: - dictvalue = tok.copy() # ParseResults(i) - except Exception: - exc = TypeError( - "could not extract dict values from parsed results" - " - Dict expression must contain Grouped expressions" - ) - raise exc from None - - del dictvalue[0] - - if len(dictvalue) != 1 or ( - isinstance(dictvalue, ParseResults) and dictvalue.haskeys() - ): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self._asPythonDict: - return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() - else: - return [tokenlist] if self.resultsName else tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + ZeroOrMore(',' + wd) - print(wd_list1.parse_string(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) - print(wd_list2.parse_string(source)) - - # Skipped text (using '...') can be suppressed as well - source = "lead in START relevant text END trailing text" - start_marker = Keyword("START") - end_marker = Keyword("END") - find_body = Suppress(...) + start_marker + ... + end_marker - print(find_body.parse_string(source) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - ['START', 'relevant text ', 'END'] - - (See also :class:`delimited_list`.) - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - if expr is ...: - expr = _PendingSkip(NoMatch()) - super().__init__(expr) - - def __add__(self, other): - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) + other - else: - return super().__add__(other) - - def __sub__(self, other): - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) - other - else: - return super().__sub__(other) - - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self): - return self - - -def trace_parse_action(f: ParseAction): - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:, , )"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @trace_parse_action - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = OneOrMore(wd).set_parse_action(remove_duplicate_chars) - print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - < 3: - thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc - sys.stderr.write( - ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t) - ) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write("< "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = ( - lambda p: p - if not isinstance(p, ParseResults) - else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - ) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) - except Exception: - return "" - - -def token_map(func, *args): - """Helper to define a parse action by mapping a function to all - elements of a :class:`ParseResults` list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transform_string`:: - - hex_ints = OneOrMore(Word(hexnums)).set_parse_action(token_map(int, 16)) - hex_ints.run_tests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).set_parse_action(token_map(str.upper)) - OneOrMore(upperword).run_tests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).set_parse_action(token_map(str.title)) - OneOrMore(wd).set_parse_action(' '.join).run_tests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - pa.__name__ = func_name - - return pa - - -def autoname_elements(): - """ - Utility to simplify mass-naming of parser elements, for - generating railroad diagram with named subdiagrams. - """ - for name, var in sys._getframe().f_back.f_locals.items(): - if isinstance(var, ParserElement) and not var.customName: - var.set_name(name) - - -dbl_quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' -).set_name("string enclosed in double quotes") - -sgl_quoted_string = Combine( - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("string enclosed in single quotes") - -quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' - | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("quotedString using single or double quotes") - -unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") - - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] - -# backward compatibility names -tokenMap = token_map -conditionAsParseAction = condition_as_parse_action -nullDebugAction = null_debug_action -sglQuotedString = sgl_quoted_string -dblQuotedString = dbl_quoted_string -quotedString = quoted_string -unicodeString = unicode_string -lineStart = line_start -lineEnd = line_end -stringStart = string_start -stringEnd = string_end -traceParseAction = trace_parse_action diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/__init__.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/__init__.py deleted file mode 100644 index 4f7c41e44..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/__init__.py +++ /dev/null @@ -1,593 +0,0 @@ -import railroad -import pyparsing -from pkg_resources import resource_filename -from typing import ( - List, - Optional, - NamedTuple, - Generic, - TypeVar, - Dict, - Callable, - Set, - Iterable, -) -from jinja2 import Template -from io import StringIO -import inspect - -with open(resource_filename(__name__, "template.jinja2"), encoding="utf-8") as fp: - template = Template(fp.read()) - -# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet -NamedDiagram = NamedTuple( - "NamedDiagram", - [("name", str), ("diagram", Optional[railroad.DiagramItem]), ("index", int)], -) -""" -A simple structure for associating a name with a railroad diagram -""" - -T = TypeVar("T") - - -class EachItem(railroad.Group): - """ - Custom railroad item to compose a: - - Group containing a - - OneOrMore containing a - - Choice of the elements in the Each - with the group label indicating that all must be matched - """ - - all_label = "[ALL]" - - def __init__(self, *items): - choice_item = railroad.Choice(len(items) - 1, *items) - one_or_more_item = railroad.OneOrMore(item=choice_item) - super().__init__(one_or_more_item, label=self.all_label) - - -class AnnotatedItem(railroad.Group): - """ - Simple subclass of Group that creates an annotation label - """ - - def __init__(self, label: str, item): - super().__init__(item=item, label="[{}]".format(label)) - - -class EditablePartial(Generic[T]): - """ - Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been - constructed. - """ - - # We need this here because the railroad constructors actually transform the data, so can't be called until the - # entire tree is assembled - - def __init__(self, func: Callable[..., T], args: list, kwargs: dict): - self.func = func - self.args = args - self.kwargs = kwargs - - @classmethod - def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": - """ - If you call this function in the same way that you would call the constructor, it will store the arguments - as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) - """ - return EditablePartial(func=func, args=list(args), kwargs=kwargs) - - @property - def name(self): - return self.kwargs["name"] - - def __call__(self) -> T: - """ - Evaluate the partial and return the result - """ - args = self.args.copy() - kwargs = self.kwargs.copy() - - # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. - # args=['list', 'of', 'things']) - arg_spec = inspect.getfullargspec(self.func) - if arg_spec.varargs in self.kwargs: - args += kwargs.pop(arg_spec.varargs) - - return self.func(*args, **kwargs) - - -def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: - """ - Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams - :params kwargs: kwargs to be passed in to the template - """ - data = [] - for diagram in diagrams: - io = StringIO() - diagram.diagram.writeSvg(io.write) - title = diagram.name - if diagram.index == 0: - title += " (root)" - data.append({"title": title, "text": "", "svg": io.getvalue()}) - - return template.render(diagrams=data, **kwargs) - - -def resolve_partial(partial: "EditablePartial[T]") -> T: - """ - Recursively resolves a collection of Partials into whatever type they are - """ - if isinstance(partial, EditablePartial): - partial.args = resolve_partial(partial.args) - partial.kwargs = resolve_partial(partial.kwargs) - return partial() - elif isinstance(partial, list): - return [resolve_partial(x) for x in partial] - elif isinstance(partial, dict): - return {key: resolve_partial(x) for key, x in partial.items()} - else: - return partial - - -def to_railroad( - element: pyparsing.ParserElement, - diagram_kwargs: Optional[dict] = None, - vertical: int = 3, - show_results_names: bool = False, -) -> List[NamedDiagram]: - """ - Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram - creation if you want to access the Railroad tree before it is converted to HTML - :param element: base element of the parser being diagrammed - :param diagram_kwargs: kwargs to pass to the Diagram() constructor - :param vertical: (optional) - int - limit at which number of alternatives should be - shown vertically instead of horizontally - :param show_results_names - bool to indicate whether results name annotations should be - included in the diagram - """ - # Convert the whole tree underneath the root - lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) - _to_diagram_element( - element, - lookup=lookup, - parent=None, - vertical=vertical, - show_results_names=show_results_names, - ) - - root_id = id(element) - # Convert the root if it hasn't been already - if root_id in lookup: - if not element.customName: - lookup[root_id].name = "" - lookup[root_id].mark_for_extraction(root_id, lookup, force=True) - - # Now that we're finished, we can convert from intermediate structures into Railroad elements - diags = list(lookup.diagrams.values()) - if len(diags) > 1: - # collapse out duplicate diags with the same name - seen = set() - deduped_diags = [] - for d in diags: - # don't extract SkipTo elements, they are uninformative as subdiagrams - if d.name == "...": - continue - if d.name is not None and d.name not in seen: - seen.add(d.name) - deduped_diags.append(d) - resolved = [resolve_partial(partial) for partial in deduped_diags] - else: - # special case - if just one diagram, always display it, even if - # it has no name - resolved = [resolve_partial(partial) for partial in diags] - return sorted(resolved, key=lambda diag: diag.index) - - -def _should_vertical( - specification: int, exprs: Iterable[pyparsing.ParserElement] -) -> bool: - """ - Returns true if we should return a vertical list of elements - """ - if specification is None: - return False - else: - return len(_visible_exprs(exprs)) >= specification - - -class ElementState: - """ - State recorded for an individual pyparsing Element - """ - - # Note: this should be a dataclass, but we have to support Python 3.5 - def __init__( - self, - element: pyparsing.ParserElement, - converted: EditablePartial, - parent: EditablePartial, - number: int, - name: str = None, - parent_index: Optional[int] = None, - ): - #: The pyparsing element that this represents - self.element: pyparsing.ParserElement = element - #: The name of the element - self.name: str = name - #: The output Railroad element in an unconverted state - self.converted: EditablePartial = converted - #: The parent Railroad element, which we store so that we can extract this if it's duplicated - self.parent: EditablePartial = parent - #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram - self.number: int = number - #: The index of this inside its parent - self.parent_index: Optional[int] = parent_index - #: If true, we should extract this out into a subdiagram - self.extract: bool = False - #: If true, all of this element's children have been filled out - self.complete: bool = False - - def mark_for_extraction( - self, el_id: int, state: "ConverterState", name: str = None, force: bool = False - ): - """ - Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram - :param el_id: id of the element - :param state: element/diagram state tracker - :param name: name to use for this element's text - :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the - root element when we know we're finished - """ - self.extract = True - - # Set the name - if not self.name: - if name: - # Allow forcing a custom name - self.name = name - elif self.element.customName: - self.name = self.element.customName - else: - self.name = "" - - # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children - # to be added - # Also, if this is just a string literal etc, don't bother extracting it - if force or (self.complete and _worth_extracting(self.element)): - state.extract_into_diagram(el_id) - - -class ConverterState: - """ - Stores some state that persists between recursions into the element tree - """ - - def __init__(self, diagram_kwargs: Optional[dict] = None): - #: A dictionary mapping ParserElements to state relating to them - self._element_diagram_states: Dict[int, ElementState] = {} - #: A dictionary mapping ParserElement IDs to subdiagrams generated from them - self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} - #: The index of the next unnamed element - self.unnamed_index: int = 1 - #: The index of the next element. This is used for sorting - self.index: int = 0 - #: Shared kwargs that are used to customize the construction of diagrams - self.diagram_kwargs: dict = diagram_kwargs or {} - self.extracted_diagram_names: Set[str] = set() - - def __setitem__(self, key: int, value: ElementState): - self._element_diagram_states[key] = value - - def __getitem__(self, key: int) -> ElementState: - return self._element_diagram_states[key] - - def __delitem__(self, key: int): - del self._element_diagram_states[key] - - def __contains__(self, key: int): - return key in self._element_diagram_states - - def generate_unnamed(self) -> int: - """ - Generate a number used in the name of an otherwise unnamed diagram - """ - self.unnamed_index += 1 - return self.unnamed_index - - def generate_index(self) -> int: - """ - Generate a number used to index a diagram - """ - self.index += 1 - return self.index - - def extract_into_diagram(self, el_id: int): - """ - Used when we encounter the same token twice in the same tree. When this - happens, we replace all instances of that token with a terminal, and - create a new subdiagram for the token - """ - position = self[el_id] - - # Replace the original definition of this element with a regular block - if position.parent: - ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) - if "item" in position.parent.kwargs: - position.parent.kwargs["item"] = ret - elif "items" in position.parent.kwargs: - position.parent.kwargs["items"][position.parent_index] = ret - - # If the element we're extracting is a group, skip to its content but keep the title - if position.converted.func == railroad.Group: - content = position.converted.kwargs["item"] - else: - content = position.converted - - self.diagrams[el_id] = EditablePartial.from_call( - NamedDiagram, - name=position.name, - diagram=EditablePartial.from_call( - railroad.Diagram, content, **self.diagram_kwargs - ), - index=position.number, - ) - - del self[el_id] - - -def _worth_extracting(element: pyparsing.ParserElement) -> bool: - """ - Returns true if this element is worth having its own sub-diagram. Simply, if any of its children - themselves have children, then its complex enough to extract - """ - children = element.recurse() - return any(child.recurse() for child in children) - - -def _apply_diagram_item_enhancements(fn): - """ - decorator to ensure enhancements to a diagram item (such as results name annotations) - get applied on return from _to_diagram_element (we do this since there are several - returns in _to_diagram_element) - """ - - def _inner( - element: pyparsing.ParserElement, - parent: Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - ) -> Optional[EditablePartial]: - - ret = fn( - element, - parent, - lookup, - vertical, - index, - name_hint, - show_results_names, - ) - - # apply annotation for results name, if present - if show_results_names and ret is not None: - element_results_name = element.resultsName - if element_results_name: - # add "*" to indicate if this is a "list all results" name - element_results_name += "" if element.modalResults else "*" - ret = EditablePartial.from_call( - railroad.Group, item=ret, label=element_results_name - ) - - return ret - - return _inner - - -def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): - non_diagramming_exprs = ( - pyparsing.ParseElementEnhance, - pyparsing.PositionToken, - pyparsing.And._ErrorStop, - ) - return [ - e - for e in exprs - if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) - ] - - -@_apply_diagram_item_enhancements -def _to_diagram_element( - element: pyparsing.ParserElement, - parent: Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, -) -> Optional[EditablePartial]: - """ - Recursively converts a PyParsing Element to a railroad Element - :param lookup: The shared converter state that keeps track of useful things - :param index: The index of this element within the parent - :param parent: The parent of this element in the output tree - :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), - it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never - do so - :param name_hint: If provided, this will override the generated name - :param show_results_names: bool flag indicating whether to add annotations for results names - :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed - """ - exprs = element.recurse() - name = name_hint or element.customName or element.__class__.__name__ - - # Python's id() is used to provide a unique identifier for elements - el_id = id(element) - - element_results_name = element.resultsName - - # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram - if not element.customName: - if isinstance( - element, - ( - pyparsing.TokenConverter, - # pyparsing.Forward, - pyparsing.Located, - ), - ): - # However, if this element has a useful custom name, and its child does not, we can pass it on to the child - if exprs: - if not exprs[0].customName: - propagated_name = name - else: - propagated_name = None - - return _to_diagram_element( - element.expr, - parent=parent, - lookup=lookup, - vertical=vertical, - index=index, - name_hint=propagated_name, - show_results_names=show_results_names, - ) - - # If the element isn't worth extracting, we always treat it as the first time we say it - if _worth_extracting(element): - if el_id in lookup: - # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, - # so we have to extract it into a new diagram. - looked_up = lookup[el_id] - looked_up.mark_for_extraction(el_id, lookup, name=name_hint) - ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) - return ret - - elif el_id in lookup.diagrams: - # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we - # just put in a marker element that refers to the sub-diagram - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - return ret - - # Recursively convert child elements - # Here we find the most relevant Railroad element for matching pyparsing Element - # We use ``items=[]`` here to hold the place for where the child elements will go once created - if isinstance(element, pyparsing.And): - # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat - # (all will have the same name, and resultsName) - if not exprs: - return None - if len(set((e.name, e.resultsName) for e in exprs)) == 1: - ret = EditablePartial.from_call( - railroad.OneOrMore, item="", repeat=str(len(exprs)) - ) - elif _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Stack, items=[]) - else: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): - if not exprs: - return None - if _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) - else: - ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) - elif isinstance(element, pyparsing.Each): - if not exprs: - return None - ret = EditablePartial.from_call(EachItem, items=[]) - elif isinstance(element, pyparsing.NotAny): - ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") - elif isinstance(element, pyparsing.FollowedBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") - elif isinstance(element, pyparsing.PrecededBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") - elif isinstance(element, pyparsing.Opt): - ret = EditablePartial.from_call(railroad.Optional, item="") - elif isinstance(element, pyparsing.OneOrMore): - ret = EditablePartial.from_call(railroad.OneOrMore, item="") - elif isinstance(element, pyparsing.ZeroOrMore): - ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") - elif isinstance(element, pyparsing.Group): - ret = EditablePartial.from_call( - railroad.Group, item=None, label=element_results_name - ) - elif isinstance(element, pyparsing.Empty) and not element.customName: - # Skip unnamed "Empty" elements - ret = None - elif len(exprs) > 1: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif len(exprs) > 0 and not element_results_name: - ret = EditablePartial.from_call(railroad.Group, item="", label=name) - else: - terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) - ret = terminal - - if ret is None: - return - - # Indicate this element's position in the tree so we can extract it if necessary - lookup[el_id] = ElementState( - element=element, - converted=ret, - parent=parent, - parent_index=index, - number=lookup.generate_index(), - ) - if element.customName: - lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) - - i = 0 - for expr in exprs: - # Add a placeholder index in case we have to extract the child before we even add it to the parent - if "items" in ret.kwargs: - ret.kwargs["items"].insert(i, None) - - item = _to_diagram_element( - expr, - parent=ret, - lookup=lookup, - vertical=vertical, - index=i, - show_results_names=show_results_names, - ) - - # Some elements don't need to be shown in the diagram - if item is not None: - if "item" in ret.kwargs: - ret.kwargs["item"] = item - elif "items" in ret.kwargs: - # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal - ret.kwargs["items"][i] = item - i += 1 - elif "items" in ret.kwargs: - # If we're supposed to skip this element, remove it from the parent - del ret.kwargs["items"][i] - - # If all this items children are none, skip this item - if ret and ( - ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) - or ("item" in ret.kwargs and ret.kwargs["item"] is None) - ): - ret = EditablePartial.from_call(railroad.Terminal, name) - - # Mark this element as "complete", ie it has all of its children - if el_id in lookup: - lookup[el_id].complete = True - - if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: - lookup.extract_into_diagram(el_id) - if ret is not None: - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - - return ret diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/template.jinja2 b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/template.jinja2 deleted file mode 100644 index d2219fb01..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/diagram/template.jinja2 +++ /dev/null @@ -1,26 +0,0 @@ - - - - {% if not head %} - - {% else %} - {{ hear | safe }} - {% endif %} - - -{{ body | safe }} -{% for diagram in diagrams %} -
-

{{ diagram.title }}

-
{{ diagram.text }}
-
- {{ diagram.svg }} -
-
-{% endfor %} - - diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/exceptions.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/exceptions.py deleted file mode 100644 index e06513eb0..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/exceptions.py +++ /dev/null @@ -1,267 +0,0 @@ -# exceptions.py - -import re -import sys -from typing import Optional - -from .util import col, line, lineno, _collapse_string_to_ranges -from .unicode import pyparsing_unicode as ppu - - -class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic): - pass - - -_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums) -_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") - - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, - pstr: str, - loc: int = 0, - msg: Optional[str] = None, - elem=None, - ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parser_element = self.parserElement = elem - self.args = (pstr, loc, msg) - - @staticmethod - def explain_exception(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - """ - import inspect - from .core import ParserElement - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(" " * (exc.column - 1) + "^") - ret.append("{}: {}".format(type(exc).__name__, exc)) - - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff[0] - - f_self = frm.f_locals.get("self", None) - if isinstance(f_self, ParserElement): - if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"): - continue - if id(f_self) in seen: - continue - seen.add(id(f_self)) - - self_type = type(f_self) - ret.append( - "{}.{} - {}".format( - self_type.__module__, self_type.__name__, f_self - ) - ) - - elif f_self is not None: - self_type = type(f_self) - ret.append("{}.{}".format(self_type.__module__, self_type.__name__)) - - else: - code = frm.f_code - if code.co_name in ("wrapper", ""): - continue - - ret.append("{}".format(code.co_name)) - - depth -= 1 - if not depth: - break - - return "\n".join(ret) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - @property - def line(self) -> str: - """ - Return the line of text where the exception occurred. - """ - return line(self.loc, self.pstr) - - @property - def lineno(self) -> int: - """ - Return the 1-based line number of text where the exception occurred. - """ - return lineno(self.loc, self.pstr) - - @property - def col(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - @property - def column(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - def __str__(self) -> str: - if self.pstr: - if self.loc >= len(self.pstr): - foundstr = ", found end of text" - else: - # pull out next word at error location - found_match = _exception_word_extractor.match(self.pstr, self.loc) - if found_match is not None: - found = found_match.group(0) - else: - found = self.pstr[self.loc : self.loc + 1] - foundstr = (", found %r" % found).replace(r"\\", "\\") - else: - foundstr = "" - return "{}{} (at char {}), (line:{}, col:{})".format( - self.msg, foundstr, self.loc, self.lineno, self.column - ) - - def __repr__(self): - return str(self) - - def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str: - """ - Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - markerString = marker_string if marker_string is not None else markerString - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( - (line_str[:line_column], markerString, line_str[line_column:]) - ) - return line_str.strip() - - def explain(self, depth=16) -> str: - """ - Method to translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Example:: - - expr = pp.Word(pp.nums) * 3 - try: - expr.parse_string("123 456 A789") - except pp.ParseException as pe: - print(pe.explain(depth=0)) - - prints:: - - 123 456 A789 - ^ - ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `set_name` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - Note: pyparsing's default truncation of exception tracebacks may also truncate the - stack of expressions that are displayed in the ``explain`` output. To get the full listing - of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` - """ - return self.explain_exception(self, depth) - - markInputline = mark_input_line - - -class ParseException(ParseBaseException): - """ - Exception thrown when a parse expression doesn't match the input string - - Example:: - - try: - Word(nums).set_name("integer").parse_string("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.column)) - - prints:: - - Expected integer (at char 0), (line:1, col:1) - column: 1 - - """ - - -class ParseFatalException(ParseBaseException): - """ - User-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately - """ - - -class ParseSyntaxException(ParseFatalException): - """ - Just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - - -class RecursiveGrammarException(Exception): - """ - Exception thrown by :class:`ParserElement.validate` if the - grammar could be left-recursive; parser may need to enable - left recursion using :class:`ParserElement.enable_left_recursion` - """ - - def __init__(self, parseElementList): - self.parseElementTrace = parseElementList - - def __str__(self) -> str: - return "RecursiveGrammarException: {}".format(self.parseElementTrace) diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/helpers.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/helpers.py deleted file mode 100644 index 7d6119712..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/helpers.py +++ /dev/null @@ -1,1059 +0,0 @@ -# helpers.py -import html.entities -import re - -from . import __diag__ -from .core import * -from .util import _bslash, _flatten, _escape_regex_range_chars - - -# -# global helpers -# -def delimited_list( - expr: Union[str, ParserElement], - delim: Union[str, ParserElement] = ",", - combine: bool = False, - *, - allow_trailing_delim: bool = False, -) -> ParserElement: - """Helper to define a delimited list of expressions - the delimiter - defaults to ','. By default, the list elements and delimiters can - have intervening whitespace, and comments, but this can be - overridden by passing ``combine=True`` in the constructor. If - ``combine`` is set to ``True``, the matching tokens are - returned as a single token string, with the delimiters included; - otherwise, the matching tokens are returned as a list of tokens, - with the delimiters suppressed. - - If ``allow_trailing_delim`` is set to True, then the list may end with - a delimiter. - - Example:: - - delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] - delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - if isinstance(expr, str_type): - expr = ParserElement._literalStringClass(expr) - - dlName = "{expr} [{delim} {expr}]...{end}".format( - expr=str(expr.streamline()), - delim=str(delim), - end=" [{}]".format(str(delim)) if allow_trailing_delim else "", - ) - - if not combine: - delim = Suppress(delim) - - delimited_list_expr = expr + ZeroOrMore(delim + expr) - - if allow_trailing_delim: - delimited_list_expr += Opt(delim) - - if combine: - return Combine(delimited_list_expr).set_name(dlName) - else: - return delimited_list_expr.set_name(dlName) - - -def counted_array( - expr: ParserElement, - int_expr: OptionalType[ParserElement] = None, - *, - intExpr: OptionalType[ParserElement] = None, -) -> ParserElement: - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``int_expr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) - counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] - - # if other fields must be parsed after the count but before the - # list items, give the fields results names and they will - # be preserved in the returned ParseResults: - count_with_metadata = integer + Word(alphas)("type") - typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") - result = typed_array.parse_string("3 bool True True False") - print(result.dump()) - - # prints - # ['True', 'True', 'False'] - # - items: ['True', 'True', 'False'] - # - type: 'bool' - """ - intExpr = intExpr or int_expr - array_expr = Forward() - - def count_field_parse_action(s, l, t): - nonlocal array_expr - n = t[0] - array_expr <<= (expr * n) if n else Empty() - # clear list contents, but keep any named results - del t[:] - - if intExpr is None: - intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.set_name("arrayLen") - intExpr.add_parse_action(count_field_parse_action, call_during_try=True) - return (intExpr + array_expr).set_name("(len) " + str(expr) + "...") - - -def match_previous_literal(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_literal(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`match_previous_expr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - - def copy_token_to_repeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.as_list()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def match_previous_expr(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_expr(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - - def copy_token_to_repeater(s, l, t): - matchTokens = _flatten(t.as_list()) - - def must_match_these_tokens(s, l, t): - theseTokens = _flatten(t.as_list()) - if theseTokens != matchTokens: - raise ParseException("", 0, "") - - rep.set_parse_action(must_match_these_tokens, callDuringTry=True) - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def one_of( - strs: Union[IterableType[str], str], - caseless: bool = False, - use_regex: bool = True, - as_keyword: bool = False, - *, - useRegex: bool = True, - asKeyword: bool = False, -) -> ParserElement: - """Helper to quickly define a set of alternative :class:`Literal` s, - and makes sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - ``strs`` - a string of space-delimited literals, or a collection of - string literals - - ``caseless`` - treat all literals as caseless - (default= ``False``) - - ``use_regex`` - as an optimization, will - generate a :class:`Regex` object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if - creating a :class:`Regex` raises an exception) - (default= ``True``) - - ``as_keyword`` - enforce :class:`Keyword`-style matching on the - generated expressions - (default= ``False``) - - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, - but will be removed in a future release - - Example:: - - comp_oper = one_of("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - asKeyword = asKeyword or as_keyword - useRegex = useRegex and use_regex - - if ( - isinstance(caseless, str_type) - and __diag__.warn_on_multiple_string_args_to_oneof - ): - warnings.warn( - "More than one string argument passed to one_of, pass" - " choices as a list or space-delimited string", - stacklevel=2, - ) - - if caseless: - isequal = lambda a, b: a.upper() == b.upper() - masks = lambda a, b: b.upper().startswith(a.upper()) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = lambda a, b: a == b - masks = lambda a, b: b.startswith(a) - parseElementClass = Keyword if asKeyword else Literal - - symbols = [] - if isinstance(strs, str_type): - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - raise TypeError("Invalid argument to one_of, expected string or iterable") - if not symbols: - return NoMatch() - - # reorder given symbols to take care to avoid masking longer choices with shorter ones - # (but only if the given symbols are not just single characters) - if any(len(sym) > 1 for sym in symbols): - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1 :]): - if isequal(other, cur): - del symbols[i + j + 1] - break - elif masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if useRegex: - re_flags: int = re.IGNORECASE if caseless else 0 - - try: - if all(len(sym) == 1 for sym in symbols): - # symbols are just single characters, create range regex pattern - patt = "[{}]".format( - "".join(_escape_regex_range_chars(sym) for sym in symbols) - ) - else: - patt = "|".join(re.escape(sym) for sym in symbols) - - # wrap with \b word break markers if defining as keywords - if asKeyword: - patt = r"\b(?:{})\b".format(patt) - - ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) - - if caseless: - # add parse action to return symbols as specified, not in random - # casing as found in input string - symbol_map = {sym.lower(): sym for sym in symbols} - ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) - - return ret - - except sre_constants.error: - warnings.warn( - "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 - ) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( - " | ".join(symbols) - ) - - -def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - print(OneOrMore(attr_expr).parse_string(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) - - # similar to Dict, but simpler call format - result = dict_of(attr_label, attr_value).parse_string(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.as_dict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - - -def original_text_for( - expr: ParserElement, as_string: bool = True, *, asString: bool = True -) -> ParserElement: - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns astring containing the original parsed text. - - If the optional ``as_string`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`original_text_for` contains expressions with defined - results names, you must set ``as_string`` to ``False`` if you - want to preserve those results name values. - - The ``asString`` pre-PEP8 argument is retained for compatibility, - but will be removed in a future release. - - Example:: - - src = "this is test bold text normal text " - for tag in ("b", "i"): - opener, closer = make_html_tags(tag) - patt = original_text_for(opener + SkipTo(closer) + closer) - print(patt.search_string(src)[0]) - - prints:: - - [' bold text '] - ['text'] - """ - asString = asString and as_string - - locMarker = Empty().set_parse_action(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start : t._original_end] - else: - - def extractText(s, l, t): - t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] - - matchExpr.set_parse_action(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) - return matchExpr - - -def ungroup(expr: ParserElement) -> ParserElement: - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).add_parse_action(lambda t: t[0]) - - -def locatedExpr(expr: ParserElement) -> ParserElement: - """ - (DEPRECATED - future code should use the Located class) - Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parseWithTabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().set_parse_action(lambda ss, ll, tt: ll) - return Group( - locator("locn_start") - + expr("value") - + locator.copy().leaveWhitespace()("locn_end") - ) - - -def nested_expr( - opener: Union[str, ParserElement] = "(", - closer: Union[str, ParserElement] = ")", - content: OptionalType[ParserElement] = None, - ignore_expr: ParserElement = quoted_string(), - *, - ignoreExpr: ParserElement = quoted_string(), -) -> ParserElement: - """Helper method for defining nested lists enclosed in opening and - closing delimiters (``"("`` and ``")"`` are the default). - - Parameters: - - ``opener`` - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - ``closer`` - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - ``content`` - expression for items within the nested lists - (default= ``None``) - - ``ignore_expr`` - expression for ignoring opening and closing delimiters - (default= :class:`quoted_string`) - - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility - but will be removed in a future release - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignore_expr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quoted_string or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quoted_string`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = one_of("void int short long char float double") - decl_data_type = Combine(data_type + Opt(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Opt(delimited_list(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(c_style_comment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.search_string(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if ignoreExpr != ignore_expr: - ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, str_type) and isinstance(closer, str_type): - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS, - exact=1, - ) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = empty.copy() + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS - ).set_parse_action(lambda t: t[0].strip()) - else: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = Combine( - OneOrMore( - ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - raise ValueError( - "opening and closing arguments must be strings if no content expression is given" - ) - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( - Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) - ) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.set_name("nested %s%s expression" % (opener, closer)) - return ret - - -def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, str_type): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - else: - tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( - printables, exclude_chars=">" - ) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict( - ZeroOrMore( - Group( - tagAttrName.set_parse_action(lambda t: t[0].lower()) - + Opt(Suppress("=") + tagAttrValue) - ) - ) - ) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - closeTag = Combine(Literal("", adjacent=False) - - openTag.set_name("<%s>" % resname) - # add start results name in parse action now that ungrouped names are not reported at two levels - openTag.add_parse_action( - lambda t: t.__setitem__( - "start" + "".join(resname.replace(":", " ").title().split()), t.copy() - ) - ) - closeTag = closeTag( - "end" + "".join(resname.replace(":", " ").title().split()) - ).set_name("" % resname) - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - - -def make_html_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = 'More info at the pyparsing wiki page' - # make_html_tags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = make_html_tags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.search_string(text): - # attributes in the tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tag_str, False) - - -def make_xml_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`make_html_tags` - """ - return _makeTags(tag_str, True) - - -any_open_tag, any_close_tag = make_html_tags( - Word(alphas, alphanums + "_:").set_name("any tag") -) - -_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} -common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( - "common HTML entity" -) - - -def replace_html_entity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - - -class OpAssoc(Enum): - LEFT = 1 - RIGHT = 2 - - -InfixNotationOperatorArgType = Union[ - ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] -] -InfixNotationOperatorSpec = Union[ - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - OptionalType[ParseAction], - ], - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - ], -] - - -def infix_notation( - base_expr: ParserElement, - op_list: List[InfixNotationOperatorSpec], - lpar: Union[str, ParserElement] = Suppress("("), - rpar: Union[str, ParserElement] = Suppress(")"), -) -> ParserElement: - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infix_notation. See - :class:`ParserElement.enable_packrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - ``base_expr`` - expression representing the most basic operand to - be used in the expression - - ``op_list`` - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(op_expr, - num_operands, right_left_assoc, (optional)parse_action)``, where: - - - ``op_expr`` is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if ``num_operands`` - is 3, ``op_expr`` is a tuple of two expressions, for the two - operators separating the 3 terms - - ``num_operands`` is the number of terms for this operator (must be 1, - 2, or 3) - - ``right_left_assoc`` is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. - - ``parse_action`` is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``set_parse_action(*fn)`` - (:class:`ParserElement.set_parse_action`) - - ``lpar`` - expression for matching left-parentheses - (default= ``Suppress('(')``) - - ``rpar`` - expression for matching right-parentheses - (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infix_notation(integer | varname, - [ - ('-', 1, OpAssoc.RIGHT), - (one_of('* /'), 2, OpAssoc.LEFT), - (one_of('+ -'), 2, OpAssoc.LEFT), - ]) - - arith_expr.run_tests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', full_dump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.try_parse(instring, loc) - return loc, [] - - _FB.__name__ = "FollowedBy>" - - ret = Forward() - lpar = Suppress(lpar) - rpar = Suppress(rpar) - lastExpr = base_expr | (lpar + ret + rpar) - for i, operDef in enumerate(op_list): - opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] - if isinstance(opExpr, str_type): - opExpr = ParserElement._literalStringClass(opExpr) - if arity == 3: - if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions" - ) - opExpr1, opExpr2 = opExpr - term_name = "{}{} term".format(opExpr1, opExpr2) - else: - term_name = "{} term".format(opExpr) - - if not 1 <= arity <= 3: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - - if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): - raise ValueError("operator must indicate right or left associativity") - - thisExpr = Forward().set_name(term_name) - if rightLeftAssoc is OpAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( - lastExpr + (opExpr + lastExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr - ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) - elif rightLeftAssoc is OpAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Opt): - opExpr = Opt(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( - lastExpr + (opExpr + thisExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group( - lastExpr + thisExpr[1, ...] - ) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr - ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.set_parse_action(*pa) - else: - matchExpr.set_parse_action(pa) - thisExpr <<= (matchExpr | lastExpr).setName(term_name) - lastExpr = thisExpr - ret <<= lastExpr - return ret - - -def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): - """ - (DEPRECATED - use IndentedBlock class instead) - Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - ``blockStatementExpr`` - expression defining syntax of statement that - is repeated within the indented block - - ``indentStack`` - list created by caller to manage indentation stack - (multiple ``statementWithIndentedBlock`` expressions within a single - grammar should share a common ``indentStack``) - - ``indent`` - boolean indicating whether block must be indented beyond - the current level; set to ``False`` for block of left-most statements - (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - (Note that indentedBlock uses internal parse actions which make it - incompatible with packrat parsing.) - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stacks.append(indentStack[:]) - - def reset_stack(): - indentStack[:] = backup_stacks[-1] - - def checkPeerIndent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if not (indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) - INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") - PEER = Empty().set_parse_action(checkPeerIndent).set_name("") - UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") - if indent: - smExpr = Group( - Opt(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + UNDENT - ) - else: - smExpr = Group( - Opt(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + Opt(UNDENT) - ) - - # add a parse action to remove backup_stack from list of backups - smExpr.add_parse_action( - lambda: backup_stacks.pop(-1) and None if backup_stacks else None - ) - smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.set_name("indented block") - - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( - "C style comment" -) -"Comment of the form ``/* ... */``" - -html_comment = Regex(r"").set_name("HTML comment") -"Comment of the form ````" - -rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") -dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") -"Comment of the form ``// ... (to end of line)``" - -cpp_style_comment = Combine( - Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment -).set_name("C++ style comment") -"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" - -java_style_comment = cpp_style_comment -"Same as :class:`cpp_style_comment`" - -python_style_comment = Regex(r"#.*").set_name("Python style comment") -"Comment of the form ``# ... (to end of line)``" - - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] - - -# pre-PEP8 compatible names -delimitedList = delimited_list -countedArray = counted_array -matchPreviousLiteral = match_previous_literal -matchPreviousExpr = match_previous_expr -oneOf = one_of -dictOf = dict_of -originalTextFor = original_text_for -nestedExpr = nested_expr -makeHTMLTags = make_html_tags -makeXMLTags = make_xml_tags -anyOpenTag, anyCloseTag = any_open_tag, any_close_tag -commonHTMLEntity = common_html_entity -replaceHTMLEntity = replace_html_entity -opAssoc = OpAssoc -infixNotation = infix_notation -cStyleComment = c_style_comment -htmlComment = html_comment -restOfLine = rest_of_line -dblSlashComment = dbl_slash_comment -cppStyleComment = cpp_style_comment -javaStyleComment = java_style_comment -pythonStyleComment = python_style_comment diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/results.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/results.py deleted file mode 100644 index 842d16b3c..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/results.py +++ /dev/null @@ -1,758 +0,0 @@ -# results.py -from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator -import pprint -from weakref import ref as wkref -from typing import Tuple, Any - -str_type: Tuple[type, ...] = (str, bytes) -_generator_type = type((_ for _ in ())) - - -class _ParseResultsWithOffset: - __slots__ = ["tup"] - - def __init__(self, p1, p2): - self.tup = (p1, p2) - - def __getitem__(self, i): - return self.tup[i] - - def __getstate__(self): - return self.tup - - def __setstate__(self, *args): - self.tup = args[0] - - -class ParseResults: - """Structured parse results, to provide multiple means of access to - the parsed data: - - - as a list (``len(results)``) - - by list index (``results[0], results[1]``, etc.) - - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) - - Example:: - - integer = Word(nums) - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - # equivalent form: - # date_str = (integer("year") + '/' - # + integer("month") + '/' - # + integer("day")) - - # parse_string returns a ParseResults object - result = date_str.parse_string("1999/12/31") - - def test(s, fn=repr): - print("{} -> {}".format(s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - - prints:: - - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: 31 - - month: 12 - - year: 1999 - """ - - _null_values: Tuple[Any, ...] = (None, [], "", ()) - - __slots__ = [ - "_name", - "_parent", - "_all_names", - "_modal", - "_toklist", - "_tokdict", - "__weakref__", - ] - - class List(list): - """ - Simple wrapper class to distinguish parsed list results that should be preserved - as actual Python lists, instead of being converted to :class:`ParseResults`: - - LBRACK, RBRACK = map(pp.Suppress, "[]") - element = pp.Forward() - item = ppc.integer - element_list = LBRACK + pp.delimited_list(element) + RBRACK - - # add parse actions to convert from ParseResults to actual Python collection types - def as_python_list(t): - return pp.ParseResults.List(t.as_list()) - element_list.add_parse_action(as_python_list) - - element <<= item | element_list - - element.run_tests(''' - 100 - [2,3,4] - [[2, 1],3,4] - [(2, 1),3,4] - (2,3,4) - ''', post_parse=lambda s, r: (r[0], type(r[0]))) - - prints: - - 100 - (100, ) - - [2,3,4] - ([2, 3, 4], ) - - [[2, 1],3,4] - ([[2, 1], 3, 4], ) - - (Used internally by :class:`Group` when `aslist=True`.) - """ - - def __new__(cls, contained=None): - if contained is None: - contained = [] - - if not isinstance(contained, list): - raise TypeError( - "{} may only be constructed with a list," - " not {}".format(cls.__name__, type(contained).__name__) - ) - - return list.__new__(cls) - - def __new__(cls, toklist=None, name=None, **kwargs): - if isinstance(toklist, ParseResults): - return toklist - self = object.__new__(cls) - self._name = None - self._parent = None - self._all_names = set() - - if toklist is None: - self._toklist = [] - elif isinstance(toklist, (list, _generator_type)): - self._toklist = ( - [toklist[:]] - if isinstance(toklist, ParseResults.List) - else list(toklist) - ) - else: - self._toklist = [toklist] - self._tokdict = dict() - return self - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance - ): - self._modal = modal - if name is not None and name != "": - if isinstance(name, int): - name = str(name) - if not modal: - self._all_names = {name} - self._name = name - if toklist not in self._null_values: - if isinstance(toklist, (str_type, type)): - toklist = [toklist] - if asList: - if isinstance(toklist, ParseResults): - self[name] = _ParseResultsWithOffset( - ParseResults(toklist._toklist), 0 - ) - else: - self[name] = _ParseResultsWithOffset( - ParseResults(toklist[0]), 0 - ) - self[name]._name = name - else: - try: - self[name] = toklist[0] - except (KeyError, TypeError, IndexError): - if toklist is not self: - self[name] = toklist - else: - self._name = name - - def __getitem__(self, i): - if isinstance(i, (int, slice)): - return self._toklist[i] - else: - if i not in self._all_names: - return self._tokdict[i][-1][0] - else: - return ParseResults([v[0] for v in self._tokdict[i]]) - - def __setitem__(self, k, v, isinstance=isinstance): - if isinstance(v, _ParseResultsWithOffset): - self._tokdict[k] = self._tokdict.get(k, list()) + [v] - sub = v[0] - elif isinstance(k, (int, slice)): - self._toklist[k] = v - sub = v - else: - self._tokdict[k] = self._tokdict.get(k, list()) + [ - _ParseResultsWithOffset(v, 0) - ] - sub = v - if isinstance(sub, ParseResults): - sub._parent = wkref(self) - - def __delitem__(self, i): - if isinstance(i, (int, slice)): - mylen = len(self._toklist) - del self._toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i + 1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position - (position > j) - ) - else: - del self._tokdict[i] - - def __contains__(self, k) -> bool: - return k in self._tokdict - - def __len__(self) -> int: - return len(self._toklist) - - def __bool__(self) -> bool: - return not not (self._toklist or self._tokdict) - - def __iter__(self) -> Iterator: - return iter(self._toklist) - - def __reversed__(self) -> Iterator: - return iter(self._toklist[::-1]) - - def keys(self): - return iter(self._tokdict) - - def values(self): - return (self[k] for k in self.keys()) - - def items(self): - return ((k, self[k]) for k in self.keys()) - - def haskeys(self) -> bool: - """ - Since ``keys()`` returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self._tokdict) - - def pop(self, *args, **kwargs): - """ - Removes and returns item at specified index (default= ``last``). - Supports both ``list`` and ``dict`` semantics for ``pop()``. If - passed no argument or an integer argument, it will use ``list`` - semantics and pop tokens from the list of parsed tokens. If passed - a non-integer argument (most likely a string), it will use ``dict`` - semantics and pop the corresponding value from any defined results - names. A second default return value argument is supported, just as in - ``dict.pop()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - def remove_first(tokens): - tokens.pop(0) - numlist.add_parse_action(remove_first) - print(numlist.parse_string("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + OneOrMore(Word(nums)) - print(patt.parse_string("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.add_parse_action(remove_LABEL) - print(patt.parse_string("AAB 123 321").dump()) - - prints:: - - ['AAB', '123', '321'] - - LABEL: AAB - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k, v in kwargs.items(): - if k == "default": - args = (args[0], v) - else: - raise TypeError( - "pop() got an unexpected keyword argument {!r}".format(k) - ) - if isinstance(args[0], int) or len(args) == 1 or args[0] in self: - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, default_value=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given ``default_value`` or ``None`` if no - ``default_value`` is specified. - - Similar to ``dict.get()``. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return default_value - - def insert(self, index, ins_string): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to ``list.insert()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - numlist.add_parse_action(insert_locn) - print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321'] - """ - self._toklist.insert(index, ins_string) - # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position + (position > index) - ) - - def append(self, item): - """ - Add single element to end of ``ParseResults`` list of elements. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - numlist.add_parse_action(append_sum) - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444] - """ - self._toklist.append(item) - - def extend(self, itemseq): - """ - Add sequence of elements to end of ``ParseResults`` list of elements. - - Example:: - - patt = OneOrMore(Word(alphas)) - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - patt.add_parse_action(make_palindrome) - print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self.__iadd__(itemseq) - else: - self._toklist.extend(itemseq) - - def clear(self): - """ - Clear all elements and results names. - """ - del self._toklist[:] - self._tokdict.clear() - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - if name.startswith("__"): - raise AttributeError(name) - return "" - - def __add__(self, other) -> "ParseResults": - ret = self.copy() - ret += other - return ret - - def __iadd__(self, other) -> "ParseResults": - if other._tokdict: - offset = len(self._toklist) - addoffset = lambda a: offset if a < 0 else a + offset - otheritems = other._tokdict.items() - otherdictitems = [ - (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) - for k, vlist in otheritems - for v in vlist - ] - for k, v in otherdictitems: - self[k] = v - if isinstance(v[0], ParseResults): - v[0]._parent = wkref(self) - - self._toklist += other._toklist - self._all_names |= other._all_names - return self - - def __radd__(self, other) -> "ParseResults": - if isinstance(other, int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__(self) -> str: - return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict()) - - def __str__(self) -> str: - return ( - "[" - + ", ".join( - str(i) if isinstance(i, ParseResults) else repr(i) - for i in self._toklist - ) - + "]" - ) - - def _asStringList(self, sep=""): - out = [] - for item in self._toklist: - if out and sep: - out.append(sep) - if isinstance(item, ParseResults): - out += item._asStringList() - else: - out.append(str(item)) - return out - - def as_list(self) -> list: - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - - patt = OneOrMore(Word(alphas)) - result = patt.parse_string("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] - - # Use as_list() to create an actual list - result_list = result.as_list() - print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [ - res.as_list() if isinstance(res, ParseResults) else res - for res in self._toklist - ] - - def as_dict(self) -> dict: - """ - Returns the named parse results as a nested dictionary. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('12/31/1999') - print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.as_dict() - print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - - def to_item(obj): - if isinstance(obj, ParseResults): - return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] - else: - return obj - - return dict((k, to_item(v)) for k, v in self.items()) - - def copy(self) -> "ParseResults": - """ - Returns a new copy of a :class:`ParseResults` object. - """ - ret = ParseResults(self._toklist) - ret._tokdict = self._tokdict.copy() - ret._parent = self._parent - ret._all_names |= self._all_names - ret._name = self._name - return ret - - def get_name(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = OneOrMore(user_data) - - result = user_info.parse_string("22 111-22-3333 #221B") - for item in result: - print(item.get_name(), ':', item[0]) - - prints:: - - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self._name: - return self._name - elif self._parent: - par = self._parent() - - def find_in_parent(sub): - return next( - ( - k - for k, vlist in par._tokdict.items() - for v, loc in vlist - if sub is v - ), - None, - ) - - return find_in_parent(self) if par else None - elif ( - len(self) == 1 - and len(self._tokdict) == 1 - and next(iter(self._tokdict.values()))[0][1] in (0, -1) - ): - return next(iter(self._tokdict.keys())) - else: - return None - - def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: - """ - Diagnostic method for listing out the contents of - a :class:`ParseResults`. Accepts an optional ``indent`` argument so - that this string can be embedded in a nested display of other data. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('12/31/1999') - print(result.dump()) - - prints:: - - ['12', '/', '31', '/', '1999'] - - day: 1999 - - month: 31 - - year: 12 - """ - out = [] - NL = "\n" - out.append(indent + str(self.as_list()) if include_list else "") - - if full: - if self.haskeys(): - items = sorted((str(k), v) for k, v in self.items()) - for k, v in items: - if out: - out.append(NL) - out.append("{}{}- {}: ".format(indent, (" " * _depth), k)) - if isinstance(v, ParseResults): - if v: - out.append( - v.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ) - ) - else: - out.append(str(v)) - else: - out.append(repr(v)) - if any(isinstance(vv, ParseResults) for vv in self): - v = self - for i, vv in enumerate(v): - if isinstance(vv, ParseResults): - out.append( - "\n{}{}[{}]:\n{}{}{}".format( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - vv.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ), - ) - ) - else: - out.append( - "\n%s%s[%d]:\n%s%s%s" - % ( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - str(vv), - ) - ) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the - `pprint `_ module. - Accepts additional positional or keyword args as defined for - `pprint.pprint `_ . - - Example:: - - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimited_list(term))) - result = func.parse_string("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - - prints:: - - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.as_list(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return ( - self._toklist, - ( - self._tokdict.copy(), - self._parent is not None and self._parent() or None, - self._all_names, - self._name, - ), - ) - - def __setstate__(self, state): - self._toklist, (self._tokdict, par, inAccumNames, self._name) = state - self._all_names = set(inAccumNames) - if par is not None: - self._parent = wkref(par) - else: - self._parent = None - - def __getnewargs__(self): - return self._toklist, self._name - - def __dir__(self): - return dir(type(self)) + list(self.keys()) - - @classmethod - def from_dict(cls, other, name=None) -> "ParseResults": - """ - Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the - name-value relations as results names. If an optional ``name`` argument is - given, a nested ``ParseResults`` will be returned. - """ - - def is_iterable(obj): - try: - iter(obj) - except Exception: - return False - else: - return not isinstance(obj, str_type) - - ret = cls([]) - for k, v in other.items(): - if isinstance(v, Mapping): - ret += cls.from_dict(v, name=k) - else: - ret += cls([v], name=k, asList=is_iterable(v)) - if name is not None: - ret = cls([ret], name=name) - return ret - - asList = as_list - asDict = as_dict - getName = get_name - - -MutableMapping.register(ParseResults) -MutableSequence.register(ParseResults) diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/testing.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/testing.py deleted file mode 100644 index 991972f3f..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/testing.py +++ /dev/null @@ -1,331 +0,0 @@ -# testing.py - -from contextlib import contextmanager -from typing import Optional - -from .core import ( - ParserElement, - ParseException, - Keyword, - __diag__, - __compat__, -) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - bounded recursion parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example:: - - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass - - self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace - - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - if ParserElement._packratEnabled: - self._save_context[ - "packrat_cache_size" - ] = ParserElement.packrat_cache.size - else: - self._save_context["packrat_cache_size"] = None - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context[ - "recursion_enabled" - ] = ParserElement._left_recursion_enabled - - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.set_default_whitespace_chars( - self._save_context["default_whitespace"] - ) - - ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] - - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - - for name, value in self._save_context["__diag__"].items(): - (__diag__.enable if value else __diag__.disable)(name) - - ParserElement._packratEnabled = False - if self._save_context["packrat_enabled"]: - ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) - else: - ParserElement._parse = self._save_context["packrat_parse"] - ParserElement._left_recursion_enabled = self._save_context[ - "recursion_enabled" - ] - - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - return self - - def copy(self): - ret = type(self)() - ret._save_context.update(self._save_context) - return ret - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, - and compare any defined results names with an optional ``expected_dict``. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.as_list(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.as_dict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. - """ - result = expr.parse_string(test_string, parse_all=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. - """ - result = expr.parse_string(test_string, parseAll=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of - list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped - with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. - Finally, asserts that the overall ``runTests()`` success value is ``True``. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is not None: - merged = [ - (*rpt, expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None - ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print("no validation for {!r}".format(test_string)) - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield - - @staticmethod - def with_line_numbers( - s: str, - start_line: Optional[int] = None, - end_line: Optional[int] = None, - expand_tabs: bool = True, - eol_mark: str = "|", - mark_spaces: Optional[str] = None, - mark_control: Optional[str] = None, - ) -> str: - """ - Helpful method for debugging a parser - prints a string with line and column numbers. - (Line and column numbers are 1-based.) - - :param s: tuple(bool, str - string to be printed with line and column numbers - :param start_line: int - (optional) starting line number in s to print (default=1) - :param end_line: int - (optional) ending line number in s to print (default=len(s)) - :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default - :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") - :param mark_spaces: str - (optional) special character to display in place of spaces - :param mark_control: str - (optional) convert non-printing control characters to a placeholding - character; valid values: - - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" - - any single character string - replace control characters with given string - - None (default) - string is displayed as-is - - :return: str - input string with leading line numbers and column number headers - """ - if expand_tabs: - s = s.expandtabs() - if mark_control is not None: - if mark_control == "unicode": - tbl = str.maketrans( - {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))} - | {127: 0x2421} - ) - eol_mark = "" - else: - tbl = str.maketrans( - {c: mark_control for c in list(range(0, 32)) + [127]} - ) - s = s.translate(tbl) - if mark_spaces is not None and mark_spaces != " ": - if mark_spaces == "unicode": - tbl = str.maketrans({9: 0x2409, 32: 0x2423}) - s = s.translate(tbl) - else: - s = s.replace(" ", mark_spaces) - if start_line is None: - start_line = 1 - if end_line is None: - end_line = len(s) - end_line = min(end_line, len(s)) - start_line = min(max(1, start_line), end_line) - - if mark_control != "unicode": - s_lines = s.splitlines()[start_line - 1 : end_line] - else: - s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] - if not s_lines: - return "" - - lineno_width = len(str(end_line)) - max_line_len = max(len(line) for line in s_lines) - lead = " " * (lineno_width + 1) - if max_line_len >= 99: - header0 = ( - lead - + "".join( - "{}{}".format(" " * 99, (i + 1) % 100) - for i in range(max(max_line_len // 100, 1)) - ) - + "\n" - ) - else: - header0 = "" - header1 = ( - header0 - + lead - + "".join( - " {}".format((i + 1) % 10) - for i in range(-(-max_line_len // 10)) - ) - + "\n" - ) - header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" - return ( - header1 - + header2 - + "\n".join( - "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark) - for i, line in enumerate(s_lines, start=start_line) - ) - + "\n" - ) diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/unicode.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/unicode.py deleted file mode 100644 index caa3306db..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/unicode.py +++ /dev/null @@ -1,332 +0,0 @@ -# unicode.py - -import sys -from itertools import filterfalse -from typing import List, Tuple, Union - - -class _lazyclassproperty: - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, "_intern") or any( - cls._intern is getattr(superclass, "_intern", []) - for superclass in cls.__mro__[1:] - ): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] - - -class unicode_set: - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``. Ranges can be specified using - 2-tuples or a 1-tuple, such as:: - - _ranges = [ - (0x0020, 0x007e), - (0x00a0, 0x00ff), - (0x0100,), - ] - - Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - - _ranges: UnicodeRangeList = [] - - @_lazyclassproperty - def _chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in getattr(cc, "_ranges", ()): - ret.extend(range(rr[0], rr[-1] + 1)) - return [chr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return "".join(filter(str.isalpha, cls._chars_for_ranges)) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return "".join(filter(str.isdigit, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - @_lazyclassproperty - def identchars(cls): - "all characters in this range that are valid identifier characters, plus underscore '_'" - return "".join( - sorted( - set( - "".join(filter(str.isidentifier, cls._chars_for_ranges)) - + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" - + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" - + "_" - ) - ) - ) - - @_lazyclassproperty - def identbodychars(cls): - """ - all characters in this range that are valid identifier body characters, - plus the digits 0-9 - """ - return "".join( - sorted( - set( - cls.identchars - + "0123456789" - + "".join( - c for c in cls._chars_for_ranges if ("_" + c).isidentifier() - ) - ) - ) - ) - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - - _ranges: UnicodeRangeList = [(32, sys.maxunicode)] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0020, 0x007E), - (0x00A0, 0x00FF), - ] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0100, 0x017F), - ] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0180, 0x024F), - ] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges: UnicodeRangeList = [ - (0x0342, 0x0345), - (0x0370, 0x0377), - (0x037A, 0x037F), - (0x0384, 0x038A), - (0x038C,), - (0x038E, 0x03A1), - (0x03A3, 0x03E1), - (0x03F0, 0x03FF), - (0x1D26, 0x1D2A), - (0x1D5E,), - (0x1D60,), - (0x1D66, 0x1D6A), - (0x1F00, 0x1F15), - (0x1F18, 0x1F1D), - (0x1F20, 0x1F45), - (0x1F48, 0x1F4D), - (0x1F50, 0x1F57), - (0x1F59,), - (0x1F5B,), - (0x1F5D,), - (0x1F5F, 0x1F7D), - (0x1F80, 0x1FB4), - (0x1FB6, 0x1FC4), - (0x1FC6, 0x1FD3), - (0x1FD6, 0x1FDB), - (0x1FDD, 0x1FEF), - (0x1FF2, 0x1FF4), - (0x1FF6, 0x1FFE), - (0x2129,), - (0x2719, 0x271A), - (0xAB65,), - (0x10140, 0x1018D), - (0x101A0,), - (0x1D200, 0x1D245), - (0x1F7A1, 0x1F7A7), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0400, 0x052F), - (0x1C80, 0x1C88), - (0x1D2B,), - (0x1D78,), - (0x2DE0, 0x2DFF), - (0xA640, 0xA672), - (0xA674, 0xA69F), - (0xFE2E, 0xFE2F), - ] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x2E80, 0x2E99), - (0x2E9B, 0x2EF3), - (0x31C0, 0x31E3), - (0x3400, 0x4DB5), - (0x4E00, 0x9FEF), - (0xA700, 0xA707), - (0xF900, 0xFA6D), - (0xFA70, 0xFAD9), - (0x16FE2, 0x16FE3), - (0x1F210, 0x1F212), - (0x1F214, 0x1F23B), - (0x1F240, 0x1F248), - (0x20000, 0x2A6D6), - (0x2A700, 0x2B734), - (0x2B740, 0x2B81D), - (0x2B820, 0x2CEA1), - (0x2CEB0, 0x2EBE0), - (0x2F800, 0x2FA1D), - ] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges: UnicodeRangeList = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x4E00, 0x9FBF), - (0x3000, 0x303F), - ] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3041, 0x3096), - (0x3099, 0x30A0), - (0x30FC,), - (0xFF70,), - (0x1B001,), - (0x1B150, 0x1B152), - (0x1F200,), - ] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3099, 0x309C), - (0x30A0, 0x30FF), - (0x31F0, 0x31FF), - (0x32D0, 0x32FE), - (0xFF65, 0xFF9F), - (0x1B000,), - (0x1B164, 0x1B167), - (0x1F201, 0x1F202), - (0x1F213,), - ] - - class Hangul(unicode_set): - "Unicode set for Hangul (Korean) Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x1100, 0x11FF), - (0x302E, 0x302F), - (0x3131, 0x318E), - (0x3200, 0x321C), - (0x3260, 0x327B), - (0x327E,), - (0xA960, 0xA97C), - (0xAC00, 0xD7A3), - (0xD7B0, 0xD7C6), - (0xD7CB, 0xD7FB), - (0xFFA0, 0xFFBE), - (0xFFC2, 0xFFC7), - (0xFFCA, 0xFFCF), - (0xFFD2, 0xFFD7), - (0xFFDA, 0xFFDC), - ] - - Korean = Hangul - - class CJK(Chinese, Japanese, Hangul): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - pass - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges: UnicodeRangeList = [(0x0E01, 0x0E3A), (0x0E3F, 0x0E5B)] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0600, 0x061B), - (0x061E, 0x06FF), - (0x0700, 0x077F), - ] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0591, 0x05C7), - (0x05D0, 0x05EA), - (0x05EF, 0x05F4), - (0xFB1D, 0xFB36), - (0xFB38, 0xFB3C), - (0xFB3E,), - (0xFB40, 0xFB41), - (0xFB43, 0xFB44), - (0xFB46, 0xFB4F), - ] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges: UnicodeRangeList = [(0x0900, 0x097F), (0xA8E0, 0xA8FF)] - - -pyparsing_unicode.Japanese._ranges = ( - pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges -) - -# define ranges in language character sets -pyparsing_unicode.العربية = pyparsing_unicode.Arabic -pyparsing_unicode.中文 = pyparsing_unicode.Chinese -pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic -pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek -pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew -pyparsing_unicode.日本語 = pyparsing_unicode.Japanese -pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji -pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana -pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana -pyparsing_unicode.한국어 = pyparsing_unicode.Korean -pyparsing_unicode.ไทย = pyparsing_unicode.Thai -pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari diff --git a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/util.py b/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/util.py deleted file mode 100644 index 1309ad6ef..000000000 --- a/.eggs/pyparsing-3.0.6-py3.8.egg/pyparsing/util.py +++ /dev/null @@ -1,234 +0,0 @@ -# util.py -import warnings -import types -import collections -import itertools -from functools import lru_cache -from typing import List, Union, Iterable - -_bslash = chr(92) - - -class __config_flags: - """Internal class for defining compatibility and debugging flags""" - - _all_names: List[str] = [] - _fixed_names: List[str] = [] - _type_desc = "configuration" - - @classmethod - def _set(cls, dname, value): - if dname in cls._fixed_names: - warnings.warn( - "{}.{} {} is {} and cannot be overridden".format( - cls.__name__, - dname, - cls._type_desc, - str(getattr(cls, dname)).upper(), - ) - ) - return - if dname in cls._all_names: - setattr(cls, dname, value) - else: - raise ValueError("no such {} {!r}".format(cls._type_desc, dname)) - - enable = classmethod(lambda cls, name: cls._set(name, True)) - disable = classmethod(lambda cls, name: cls._set(name, False)) - - -@lru_cache(maxsize=128) -def col(loc: int, strg: str) -> int: - """ - Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See - :class:`ParserElement.parseString` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - """ - s = strg - return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) - - -@lru_cache(maxsize=128) -def lineno(loc: int, strg: str) -> int: - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note - the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`ParserElement.parseString` - for more information on parsing strings containing ```` s, and - suggested methods to maintain a consistent view of the parsed string, the - parse location, and line and column positions within the parsed string. - """ - return strg.count("\n", 0, loc) + 1 - - -@lru_cache(maxsize=128) -def line(loc: int, strg: str) -> str: - """ - Returns the line of text containing loc within a string, counting newlines as line separators. - """ - last_cr = strg.rfind("\n", 0, loc) - next_cr = strg.find("\n", loc) - return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] - - -class _UnboundedCache: - def __init__(self): - cache = {} - cache_get = cache.get - self.not_in_cache = not_in_cache = object() - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - - def clear(_): - cache.clear() - - self.size = None - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class _FifoCache: - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - cache = collections.OrderedDict() - cache_get = cache.get - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - while len(cache) > size: - cache.popitem(last=False) - - def clear(_): - cache.clear() - - self.size = size - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class LRUMemo: - """ - A memoizing mapping that retains `capacity` deleted items - - The memo tracks retained items by their access order; once `capacity` items - are retained, the least recently used item is discarded. - """ - - def __init__(self, capacity): - self._capacity = capacity - self._active = {} - self._memory = collections.OrderedDict() - - def __getitem__(self, key): - try: - return self._active[key] - except KeyError: - self._memory.move_to_end(key) - return self._memory[key] - - def __setitem__(self, key, value): - self._memory.pop(key, None) - self._active[key] = value - - def __delitem__(self, key): - try: - value = self._active.pop(key) - except KeyError: - pass - else: - while len(self._memory) >= self._capacity: - self._memory.popitem(last=False) - self._memory[key] = value - - def clear(self): - self._active.clear() - self._memory.clear() - - -class UnboundedMemo(dict): - """ - A memoizing mapping that retains all deleted items - """ - - def __delitem__(self, key): - pass - - -def _escape_regex_range_chars(s: str) -> str: - # escape these chars: ^-[] - for c in r"\^-[]": - s = s.replace(c, _bslash + c) - s = s.replace("\n", r"\n") - s = s.replace("\t", r"\t") - return str(s) - - -def _collapse_string_to_ranges( - s: Union[str, Iterable[str]], re_escape: bool = True -) -> str: - def is_consecutive(c): - c_int = ord(c) - is_consecutive.prev, prev = c_int, is_consecutive.prev - if c_int - prev > 1: - is_consecutive.value = next(is_consecutive.counter) - return is_consecutive.value - - is_consecutive.prev = 0 - is_consecutive.counter = itertools.count() - is_consecutive.value = -1 - - def escape_re_range_char(c): - return "\\" + c if c in r"\^-][" else c - - def no_escape_re_range_char(c): - return c - - if not re_escape: - escape_re_range_char = no_escape_re_range_char - - ret = [] - s = "".join(sorted(set(s))) - if len(s) > 3: - for _, chars in itertools.groupby(s, key=is_consecutive): - first = last = next(chars) - last = collections.deque( - itertools.chain(iter([last]), chars), maxlen=1 - ).pop() - if first == last: - ret.append(escape_re_range_char(first)) - else: - ret.append( - "{}-{}".format( - escape_re_range_char(first), escape_re_range_char(last) - ) - ) - else: - ret = [escape_re_range_char(c) for c in s] - - return "".join(ret) - - -def _flatten(ll: list) -> list: - ret = [] - for i in ll: - if isinstance(i, list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/LICENSE b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/LICENSE deleted file mode 100644 index 89de35479..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/PKG-INFO deleted file mode 100644 index e917b921c..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/PKG-INFO +++ /dev/null @@ -1,639 +0,0 @@ -Metadata-Version: 2.1 -Name: setuptools-scm -Version: 6.3.2 -Summary: the blessed package to manage your versions by scm tags -Home-page: https://github.com/pypa/setuptools_scm/ -Author: Ronny Pfannschmidt -Author-email: opensource@ronnypfannschmidt.de -License: MIT -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Topic :: Software Development :: Libraries -Classifier: Topic :: Software Development :: Version Control -Classifier: Topic :: System :: Software Distribution -Classifier: Topic :: Utilities -Requires-Python: >=3.6 -Description-Content-Type: text/x-rst -License-File: LICENSE -Requires-Dist: packaging (>=20.0) -Requires-Dist: setuptools -Requires-Dist: tomli (>=1.0.0) -Provides-Extra: toml -Requires-Dist: setuptools (>=42) ; extra == 'toml' -Requires-Dist: tomli (>=1.0.0) ; extra == 'toml' - -setuptools_scm -============== - -``setuptools_scm`` handles managing your Python package versions -in SCM metadata instead of declaring them as the version argument -or in a SCM managed file. - -Additionally ``setuptools_scm`` provides setuptools with a list of files that are managed by the SCM -(i.e. it automatically adds all of the SCM-managed files to the sdist). -Unwanted files must be excluded by discarding them via ``MANIFEST.in``. - -``setuptools_scm`` support the following scm out of the box: - -* git -* mercurial - - - -.. image:: https://github.com/pypa/setuptools_scm/workflows/python%20tests+artifacts+release/badge.svg - :target: https://github.com/pypa/setuptools_scm/actions - -.. image:: https://tidelift.com/badges/package/pypi/setuptools-scm - :target: https://tidelift.com/subscription/pkg/pypi-setuptools-scm?utm_source=pypi-setuptools-scm&utm_medium=readme - - -``pyproject.toml`` usage ------------------------- - -The preferred way to configure ``setuptools_scm`` is to author -settings in a ``tool.setuptools_scm`` section of ``pyproject.toml``. - -This feature requires Setuptools 42 or later, released in Nov, 2019. -If your project needs to support build from sdist on older versions -of Setuptools, you will need to also implement the ``setup.py usage`` -for those legacy environments. - -First, ensure that ``setuptools_scm`` is present during the project's -built step by specifying it as one of the build requirements. - -.. code:: toml - - # pyproject.toml - [build-system] - requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.2"] - - -That will be sufficient to require ``setuptools_scm`` for projects -that support PEP 518 (`pip `_ and -`pep517 `_). Many tools, -especially those that invoke ``setup.py`` for any reason, may -continue to rely on ``setup_requires``. For maximum compatibility -with those uses, consider also including a ``setup_requires`` directive -(described below in ``setup.py usage`` and ``setup.cfg``). - -To enable version inference, add this section to your pyproject.toml: - -.. code:: toml - - # pyproject.toml - [tool.setuptools_scm] - -Including this section is comparable to supplying -``use_scm_version=True`` in ``setup.py``. Additionally, -include arbitrary keyword arguments in that section -to be supplied to ``get_version()``. For example: - -.. code:: toml - - # pyproject.toml - - [tool.setuptools_scm] - write_to = "pkg/_version.py" - - -``setup.py`` usage (deprecated) -------------------------------- - -.. warning:: - - ``setup_requires`` has been deprecated in favor of ``pyproject.toml`` - -The following settings are considered legacy behavior and -superseded by the ``pyproject.toml`` usage, but for maximal -compatibility, projects may also supply the configuration in -this older form. - -To use ``setuptools_scm`` just modify your project's ``setup.py`` file -like this: - -* Add ``setuptools_scm`` to the ``setup_requires`` parameter. -* Add the ``use_scm_version`` parameter and set it to ``True``. - -For example: - -.. code:: python - - from setuptools import setup - setup( - ..., - use_scm_version=True, - setup_requires=['setuptools_scm'], - ..., - ) - -Arguments to ``get_version()`` (see below) may be passed as a dictionary to -``use_scm_version``. For example: - -.. code:: python - - from setuptools import setup - setup( - ..., - use_scm_version = { - "root": "..", - "relative_to": __file__, - "local_scheme": "node-and-timestamp" - }, - setup_requires=['setuptools_scm'], - ..., - ) - -You can confirm the version number locally via ``setup.py``: - -.. code-block:: shell - - $ python setup.py --version - -.. note:: - - If you see unusual version numbers for packages but ``python setup.py - --version`` reports the expected version number, ensure ``[egg_info]`` is - not defined in ``setup.cfg``. - - -``setup.cfg`` usage (deprecated) ------------------------------------- - -as ``setup_requires`` is deprecated in favour of ``pyproject.toml`` -usage in ``setup.cfg`` is considered deprecated, -please use ``pyproject.toml`` whenever possible. - -Programmatic usage ------------------- - -In order to use ``setuptools_scm`` from code that is one directory deeper -than the project's root, you can use: - -.. code:: python - - from setuptools_scm import get_version - version = get_version(root='..', relative_to=__file__) - -See `setup.py Usage (deprecated)`_ above for how to use this within ``setup.py``. - - -Retrieving package version at runtime -------------------------------------- - -If you have opted not to hardcode the version number inside the package, -you can retrieve it at runtime from PEP-0566_ metadata using -``importlib.metadata`` from the standard library (added in Python 3.8) -or the `importlib_metadata`_ backport: - -.. code:: python - - from importlib.metadata import version, PackageNotFoundError - - try: - __version__ = version("package-name") - except PackageNotFoundError: - # package is not installed - pass - -Alternatively, you can use ``pkg_resources`` which is included in -``setuptools`` (but has a significant runtime cost): - -.. code:: python - - from pkg_resources import get_distribution, DistributionNotFound - - try: - __version__ = get_distribution("package-name").version - except DistributionNotFound: - # package is not installed - pass - -However, this does place a runtime dependency on ``setuptools`` and can add up to -a few 100ms overhead for the package import time. - -.. _PEP-0566: https://www.python.org/dev/peps/pep-0566/ -.. _importlib_metadata: https://pypi.org/project/importlib-metadata/ - - -Usage from Sphinx ------------------ - -It is discouraged to use ``setuptools_scm`` from Sphinx itself, -instead use ``importlib.metadata`` after editable/real installation: - -.. code:: python - - # contents of docs/conf.py - from importlib.metadata import version - release = version('myproject') - # for example take major/minor - version = '.'.join(release.split('.')[:2]) - -The underlying reason is, that services like *Read the Docs* sometimes change -the working directory for good reasons and using the installed metadata -prevents using needless volatile data there. - -Notable Plugins ---------------- - -`setuptools_scm_git_archive `_ - Provides partial support for obtaining versions from git archives that - belong to tagged versions. The only reason for not including it in - ``setuptools_scm`` itself is Git/GitHub not supporting sufficient metadata - for untagged/followup commits, which is preventing a consistent UX. - - -Default versioning scheme -------------------------- - -In the standard configuration ``setuptools_scm`` takes a look at three things: - -1. latest tag (with a version number) -2. the distance to this tag (e.g. number of revisions since latest tag) -3. workdir state (e.g. uncommitted changes since latest tag) - -and uses roughly the following logic to render the version: - -no distance and clean: - ``{tag}`` -distance and clean: - ``{next_version}.dev{distance}+{scm letter}{revision hash}`` -no distance and not clean: - ``{tag}+dYYYYMMDD`` -distance and not clean: - ``{next_version}.dev{distance}+{scm letter}{revision hash}.dYYYYMMDD`` - -The next version is calculated by adding ``1`` to the last numeric component of -the tag. - - -For Git projects, the version relies on `git describe `_, -so you will see an additional ``g`` prepended to the ``{revision hash}``. - -Semantic Versioning (SemVer) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Due to the default behavior it's necessary to always include a -patch version (the ``3`` in ``1.2.3``), or else the automatic guessing -will increment the wrong part of the SemVer (e.g. tag ``2.0`` results in -``2.1.devX`` instead of ``2.0.1.devX``). So please make sure to tag -accordingly. - -.. note:: - - Future versions of ``setuptools_scm`` will switch to `SemVer - `_ by default hiding the the old behavior as an - configurable option. - - -Builtin mechanisms for obtaining version numbers ------------------------------------------------- - -1. the SCM itself (git/hg) -2. ``.hg_archival`` files (mercurial archives) -3. ``PKG-INFO`` - -.. note:: - - Git archives are not supported due to Git shortcomings - - -File finders hook makes most of MANIFEST.in unnecessary -------------------------------------------------------- - -``setuptools_scm`` implements a `file_finders -`_ -entry point which returns all files tracked by your SCM. This eliminates -the need for a manually constructed ``MANIFEST.in`` in most cases where this -would be required when not using ``setuptools_scm``, namely: - -* To ensure all relevant files are packaged when running the ``sdist`` command. - -* When using `include_package_data `_ - to include package data as part of the ``build`` or ``bdist_wheel``. - -``MANIFEST.in`` may still be used: anything defined there overrides the hook. -This is mostly useful to exclude files tracked in your SCM from packages, -although in principle it can be used to explicitly include non-tracked files -too. - - -Configuration parameters ------------------------- - -In order to configure the way ``use_scm_version`` works you can provide -a mapping with options instead of a boolean value. - -The currently supported configuration keys are: - -:root: - Relative path to cwd, used for finding the SCM root; defaults to ``.`` - -:version_scheme: - Configures how the local version number is constructed; either an - entrypoint name or a callable. - -:local_scheme: - Configures how the local component of the version is constructed; either an - entrypoint name or a callable. - -:write_to: - A path to a file that gets replaced with a file containing the current - version. It is ideal for creating a ``_version.py`` file within the - package, typically used to avoid using `pkg_resources.get_distribution` - (which adds some overhead). - - .. warning:: - - Only files with :code:`.py` and :code:`.txt` extensions have builtin - templates, for other file types it is necessary to provide - :code:`write_to_template`. - -:write_to_template: - A newstyle format string that is given the current version as - the ``version`` keyword argument for formatting. - -:relative_to: - A file from which the root can be resolved. - Typically called by a script or module that is not in the root of the - repository to point ``setuptools_scm`` at the root of the repository by - supplying ``__file__``. - -:tag_regex: - A Python regex string to extract the version part from any SCM tag. - The regex needs to contain either a single match group, or a group - named ``version``, that captures the actual version information. - - Defaults to the value of ``setuptools_scm.config.DEFAULT_TAG_REGEX`` - (see `config.py `_). - -:parentdir_prefix_version: - If the normal methods for detecting the version (SCM version, - sdist metadata) fail, and the parent directory name starts with - ``parentdir_prefix_version``, then this prefix is stripped and the rest of - the parent directory name is matched with ``tag_regex`` to get a version - string. If this parameter is unset (the default), then this fallback is - not used. - - This is intended to cover GitHub's "release tarballs", which extract into - directories named ``projectname-tag/`` (in which case - ``parentdir_prefix_version`` can be set e.g. to ``projectname-``). - -:fallback_version: - A version string that will be used if no other method for detecting the - version worked (e.g., when using a tarball with no metadata). If this is - unset (the default), setuptools_scm will error if it fails to detect the - version. - -:parse: - A function that will be used instead of the discovered SCM for parsing the - version. - Use with caution, this is a function for advanced use, and you should be - familiar with the ``setuptools_scm`` internals to use it. - -:git_describe_command: - This command will be used instead the default ``git describe`` command. - Use with caution, this is a function for advanced use, and you should be - familiar with the ``setuptools_scm`` internals to use it. - - Defaults to the value set by ``setuptools_scm.git.DEFAULT_DESCRIBE`` - (see `git.py `_). - -:normalize: - A boolean flag indicating if the version string should be normalized. - Defaults to ``True``. Setting this to ``False`` is equivalent to setting - ``version_cls`` to ``setuptools_scm.version.NonNormalizedVersion`` - -:version_cls: - An optional class used to parse, verify and possibly normalize the version - string. Its constructor should receive a single string argument, and its - ``str`` should return the normalized version string to use. - This option can also receive a class qualified name as a string. - - This defaults to ``packaging.version.Version`` if available. If - ``packaging`` is not installed, ``pkg_resources.packaging.version.Version`` - is used. Note that it is known to modify git release candidate schemes. - - The ``setuptools_scm.NonNormalizedVersion`` convenience class is - provided to disable the normalization step done by - ``packaging.version.Version``. If this is used while ``setuptools_scm`` - is integrated in a setuptools packaging process, the non-normalized - version number will appear in all files (see ``write_to``) BUT note - that setuptools will still normalize it to create the final distribution, - so as to stay compliant with the python packaging standards. - - -To use ``setuptools_scm`` in other Python code you can use the ``get_version`` -function: - -.. code:: python - - from setuptools_scm import get_version - my_version = get_version() - -It optionally accepts the keys of the ``use_scm_version`` parameter as -keyword arguments. - -Example configuration in ``setup.py`` format: - -.. code:: python - - from setuptools import setup - - setup( - use_scm_version={ - 'write_to': '_version.py', - 'write_to_template': '__version__ = "{version}"', - 'tag_regex': r'^(?Pv)?(?P[^\+]+)(?P.*)?$', - } - ) - -Environment variables ---------------------- - -:SETUPTOOLS_SCM_PRETEND_VERSION: - when defined and not empty, - its used as the primary source for the version number - in which case it will be a unparsed string - - -:SETUPTOOLS_SCM_PRETEND_VERSION_FOR_${UPPERCASED_DIST_NAME}: - when defined and not empty, - its used as the primary source for the version number - in which case it will be a unparsed string - - it takes precedence over ``SETUPTOOLS_SCM_PRETEND_VERSION`` - - -:SETUPTOOLS_SCM_DEBUG: - when defined and not empty, - a lot of debug information will be printed as part of ``setuptools_scm`` - operating - -:SOURCE_DATE_EPOCH: - when defined, used as the timestamp from which the - ``node-and-date`` and ``node-and-timestamp`` local parts are - derived, otherwise the current time is used - (https://reproducible-builds.org/docs/source-date-epoch/) - - -:SETUPTOOLS_SCM_IGNORE_VCS_ROOTS: - when defined, a ``os.pathsep`` separated list - of directory names to ignore for root finding - -Extending setuptools_scm ------------------------- - -``setuptools_scm`` ships with a few ``setuptools`` entrypoints based hooks to -extend its default capabilities. - -Adding a new SCM -~~~~~~~~~~~~~~~~ - -``setuptools_scm`` provides two entrypoints for adding new SCMs: - -``setuptools_scm.parse_scm`` - A function used to parse the metadata of the current workdir - using the name of the control directory/file of your SCM as the - entrypoint's name. E.g. for the built-in entrypoint for git the - entrypoint is named ``.git`` and references ``setuptools_scm.git:parse`` - - The return value MUST be a ``setuptools_scm.version.ScmVersion`` instance - created by the function ``setuptools_scm.version:meta``. - -``setuptools_scm.files_command`` - Either a string containing a shell command that prints all SCM managed - files in its current working directory or a callable, that given a - pathname will return that list. - - Also use then name of your SCM control directory as name of the entrypoint. - -Version number construction -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``setuptools_scm.version_scheme`` - Configures how the version number is constructed given a - ``setuptools_scm.version.ScmVersion`` instance and should return a string - representing the version. - - Available implementations: - - :guess-next-dev: Automatically guesses the next development version (default). - Guesses the upcoming release by incrementing the pre-release segment if present, - otherwise by incrementing the micro segment. Then appends :code:`.devN`. - In case the tag ends with ``.dev0`` the version is not bumped - and custom ``.devN`` versions will trigger a error. - :post-release: generates post release versions (adds :code:`.postN`) - :python-simplified-semver: Basic semantic versioning. Guesses the upcoming release - by incrementing the minor segment and setting the micro segment to zero if the - current branch contains the string ``'feature'``, otherwise by incrementing the - micro version. Then appends :code:`.devN`. Not compatible with pre-releases. - :release-branch-semver: Semantic versioning for projects with release branches. The - same as ``guess-next-dev`` (incrementing the pre-release or micro segment) if on - a release branch: a branch whose name (ignoring namespace) parses as a version - that matches the most recent tag up to the minor segment. Otherwise if on a - non-release branch, increments the minor segment and sets the micro segment to - zero, then appends :code:`.devN`. - :no-guess-dev: Does no next version guessing, just adds :code:`.post1.devN` - -``setuptools_scm.local_scheme`` - Configures how the local part of a version is rendered given a - ``setuptools_scm.version.ScmVersion`` instance and should return a string - representing the local version. - Dates and times are in Coordinated Universal Time (UTC), because as part - of the version, they should be location independent. - - Available implementations: - - :node-and-date: adds the node on dev versions and the date on dirty - workdir (default) - :node-and-timestamp: like ``node-and-date`` but with a timestamp of - the form ``{:%Y%m%d%H%M%S}`` instead - :dirty-tag: adds ``+dirty`` if the current workdir has changes - :no-local-version: omits local version, useful e.g. because pypi does - not support it - - -Importing in ``setup.py`` -~~~~~~~~~~~~~~~~~~~~~~~~~ - -To support usage in ``setup.py`` passing a callable into ``use_scm_version`` -is supported. - -Within that callable, ``setuptools_scm`` is available for import. -The callable must return the configuration. - - -.. code:: python - - # content of setup.py - import setuptools - - def myversion(): - from setuptools_scm.version import get_local_dirty_tag - def clean_scheme(version): - return get_local_dirty_tag(version) if version.dirty else '+clean' - - return {'local_scheme': clean_scheme} - - setup( - ..., - use_scm_version=myversion, - ... - ) - - -Note on testing non-installed versions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -While the general advice is to test against a installed version, -some environments require a test prior to install, - -.. code:: - - $ python setup.py egg_info - $ PYTHONPATH=$PWD:$PWD/src pytest - - -Interaction with Enterprise Distributions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Some enterprise distributions like RHEL7 and others -ship rather old setuptools versions due to various release management details. - -In those case its typically possible to build by using a sdist against ``setuptools_scm<2.0``. -As those old setuptools versions lack sensible types for versions, -modern setuptools_scm is unable to support them sensibly. - -In case the project you need to build can not be patched to either use old setuptools_scm, -its still possible to install a more recent version of setuptools in order to handle the build -and/or install the package by using wheels or eggs. - - - -Code of Conduct ---------------- - -Everyone interacting in the ``setuptools_scm`` project's codebases, issue -trackers, chat rooms, and mailing lists is expected to follow the -`PSF Code of Conduct`_. - -.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md - -Security Contact -================ - -To report a security vulnerability, please use the -`Tidelift security contact `_. -Tidelift will coordinate the fix and disclosure. - - diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/RECORD b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/RECORD deleted file mode 100644 index d148bc7f0..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/RECORD +++ /dev/null @@ -1,23 +0,0 @@ -setuptools_scm/__init__.py,sha256=zWUSg4yZvzIQ6F5s6kLhYoqLyeP70TaW2ZWTeu0Upqg,6274 -setuptools_scm/__main__.py,sha256=N1ovM8yVFiMh55m0JxeKpyTxky9RX_jsMNY8Bd6TUsw,295 -setuptools_scm/_version_cls.py,sha256=OqpnIzcegf4mPaiMy6QLHflwoxiApI8Qo0zkZe2fom8,1428 -setuptools_scm/config.py,sha256=jqMkPKLhagFfDkZc7WxcNBZoq9R3FTFmW9VDTivKYqc,6733 -setuptools_scm/discover.py,sha256=rncA7Go947oa3PRoMA1306GRXxd4Q23DRFvdbXBQT34,1557 -setuptools_scm/file_finder.py,sha256=JHCd6G6m3Df4iwUgszgylKfYNDEJWM2v8v6RuldfiY8,2554 -setuptools_scm/file_finder_git.py,sha256=fyTvB3qkYvrIv_K6j0V7Pxo3--MmW6qHRnoN4y50XjM,3244 -setuptools_scm/file_finder_hg.py,sha256=v03QeJOnHJsIYR-JCy-4ntTthg0zzDGhvmFMMKGz3l0,1492 -setuptools_scm/git.py,sha256=PrtlB917DAKSrahkgzx0TiYZq1QN1MkiHmTO0g4wskU,6204 -setuptools_scm/hacks.py,sha256=Y1tBCq3PsrovBSxgQBouEHW_iW7N_E5ADN7w0OiHrr8,1316 -setuptools_scm/hg.py,sha256=XZQQfIsQC8o6r_S-vaaT4U-TSkOQkYyxEbk5dGGQhkQ,5074 -setuptools_scm/hg_git.py,sha256=QnFvA2WcykTaa3p8sE33_IIPo-RJQ-sr9xYEvYAJHsk,3499 -setuptools_scm/integration.py,sha256=WxSJcsa4oE76wFZoJU3fJEpLFFKVWjRe8a1c-y7tlWM,2732 -setuptools_scm/scm_workdir.py,sha256=k0w7Cct1cHTnFM3FWY0gNg1i8RKbDUNJf6YjABHnKBk,322 -setuptools_scm/utils.py,sha256=32_SlZLAXc9KqUEOBExvXhU2fDyDk_A0DvmQ-4AMr2s,3963 -setuptools_scm/version.py,sha256=NqB0rGfmEgVkUc-m7nPyMuUb7V7fBVMqtmYGkVBcXWs,13818 -setuptools_scm-6.3.2.dist-info/LICENSE,sha256=iYB6zyMJvShfAzQE7nhYFgLzzZuBmhasLw5fYP9KRz4,1023 -setuptools_scm-6.3.2.dist-info/METADATA,sha256=tZATPwhkHQ_8bRhfJaCI2zbgSd3TiMdnwCD_JODppKs,22252 -setuptools_scm-6.3.2.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 -setuptools_scm-6.3.2.dist-info/entry_points.txt,sha256=LzLFBv9B2emlz6AljQ8nhREGOeg8BUZAZq0am6uY9j4,1440 -setuptools_scm-6.3.2.dist-info/top_level.txt,sha256=kiu-91q3_rJLUoc2wl8_lC4cIlpgtgdD_4NaChF4hOA,15 -setuptools_scm-6.3.2.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 -setuptools_scm-6.3.2.dist-info/RECORD,, diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/WHEEL b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/WHEEL deleted file mode 100644 index 5bad85fdc..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.0) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/entry_points.txt b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/entry_points.txt deleted file mode 100644 index 88df81355..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/entry_points.txt +++ /dev/null @@ -1,37 +0,0 @@ -[distutils.setup_keywords] -use_scm_version = setuptools_scm.integration:version_keyword - -[setuptools.file_finders] -setuptools_scm = setuptools_scm.integration:find_files - -[setuptools.finalize_distribution_options] -setuptools_scm = setuptools_scm.integration:infer_version - -[setuptools_scm.files_command] -.git = setuptools_scm.file_finder_git:git_find_files -.hg = setuptools_scm.file_finder_hg:hg_find_files - -[setuptools_scm.local_scheme] -dirty-tag = setuptools_scm.version:get_local_dirty_tag -no-local-version = setuptools_scm.version:get_no_local_node -node-and-date = setuptools_scm.version:get_local_node_and_date -node-and-timestamp = setuptools_scm.version:get_local_node_and_timestamp - -[setuptools_scm.parse_scm] -.git = setuptools_scm.git:parse -.hg = setuptools_scm.hg:parse - -[setuptools_scm.parse_scm_fallback] -.hg_archival.txt = setuptools_scm.hg:parse_archival -PKG-INFO = setuptools_scm.hacks:parse_pkginfo -pip-egg-info = setuptools_scm.hacks:parse_pip_egg_info -setup.py = setuptools_scm.hacks:fallback_version - -[setuptools_scm.version_scheme] -calver-by-date = setuptools_scm.version:calver_by_date -guess-next-dev = setuptools_scm.version:guess_next_dev_version -no-guess-dev = setuptools_scm.version:no_guess_dev_version -post-release = setuptools_scm.version:postrelease_version -python-simplified-semver = setuptools_scm.version:simplified_semver_version -release-branch-semver = setuptools_scm.version:release_branch_semver_version - diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/requires.txt b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/requires.txt deleted file mode 100644 index bd01a6c23..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/requires.txt +++ /dev/null @@ -1,6 +0,0 @@ -packaging>=20.0 -setuptools -tomli>=1.0.0 - -[toml] -setuptools>=42 diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/top_level.txt b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/top_level.txt deleted file mode 100644 index cba8d8860..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -setuptools_scm diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/zip-safe b/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/zip-safe deleted file mode 100644 index 8b1378917..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/EGG-INFO/zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__init__.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__init__.py deleted file mode 100644 index b4f86eae3..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__init__.py +++ /dev/null @@ -1,212 +0,0 @@ -""" -:copyright: 2010-2015 by Ronny Pfannschmidt -:license: MIT -""" -import os -import warnings - -from ._version_cls import NonNormalizedVersion -from ._version_cls import Version -from .config import Configuration -from .config import DEFAULT_LOCAL_SCHEME -from .config import DEFAULT_TAG_REGEX -from .config import DEFAULT_VERSION_SCHEME -from .discover import iter_matching_entrypoints -from .utils import function_has_arg -from .utils import trace -from .version import format_version -from .version import meta - -PRETEND_KEY = "SETUPTOOLS_SCM_PRETEND_VERSION" -PRETEND_KEY_NAMED = PRETEND_KEY + "_FOR_{name}" - -TEMPLATES = { - ".py": """\ -# coding: utf-8 -# file generated by setuptools_scm -# don't change, don't track in version control -version = {version!r} -version_tuple = {version_tuple!r} -""", - ".txt": "{version}", -} - - -def version_from_scm(root): - warnings.warn( - "version_from_scm is deprecated please use get_version", - category=DeprecationWarning, - stacklevel=2, - ) - config = Configuration(root=root) - # TODO: Is it API? - return _version_from_entrypoints(config) - - -def _call_entrypoint_fn(root, config, fn): - if function_has_arg(fn, "config"): - return fn(root, config=config) - else: - warnings.warn( - f"parse function {fn.__module__}.{fn.__name__}" - " are required to provide a named argument" - " 'config', setuptools_scm>=8.0 will remove support.", - category=DeprecationWarning, - stacklevel=2, - ) - return fn(root) - - -def _version_from_entrypoints(config: Configuration, fallback=False): - if fallback: - entrypoint = "setuptools_scm.parse_scm_fallback" - root = config.fallback_root - else: - entrypoint = "setuptools_scm.parse_scm" - root = config.absolute_root - - for ep in iter_matching_entrypoints(root, entrypoint, config): - version = _call_entrypoint_fn(root, config, ep.load()) - trace(ep, version) - if version: - return version - - -def dump_version(root, version, write_to, template=None): - assert isinstance(version, str) - if not write_to: - return - target = os.path.normpath(os.path.join(root, write_to)) - ext = os.path.splitext(target)[1] - template = template or TEMPLATES.get(ext) - - if template is None: - raise ValueError( - "bad file format: '{}' (of {}) \nonly *.txt and *.py are supported".format( - os.path.splitext(target)[1], target - ) - ) - - parsed_version = Version(version) - version_fields = parsed_version.release - if parsed_version.dev is not None: - version_fields += (f"dev{parsed_version.dev}",) - if parsed_version.local is not None: - version_fields += (parsed_version.local,) - - with open(target, "w") as fp: - fp.write(template.format(version=version, version_tuple=tuple(version_fields))) - - -def _do_parse(config): - - trace("dist name:", config.dist_name) - if config.dist_name is not None: - pretended = os.environ.get( - PRETEND_KEY_NAMED.format(name=config.dist_name.upper()) - ) - else: - pretended = None - - if pretended is None: - pretended = os.environ.get(PRETEND_KEY) - - if pretended: - # we use meta here since the pretended version - # must adhere to the pep to begin with - return meta(tag=pretended, preformatted=True, config=config) - - if config.parse: - parse_result = _call_entrypoint_fn(config.absolute_root, config, config.parse) - if isinstance(parse_result, str): - raise TypeError( - "version parse result was a string\nplease return a parsed version" - ) - version = parse_result or _version_from_entrypoints(config, fallback=True) - else: - # include fallbacks after dropping them from the main entrypoint - version = _version_from_entrypoints(config) or _version_from_entrypoints( - config, fallback=True - ) - - if version: - return version - - raise LookupError( - "setuptools-scm was unable to detect version for %r.\n\n" - "Make sure you're either building from a fully intact git repository " - "or PyPI tarballs. Most other sources (such as GitHub's tarballs, a " - "git checkout without the .git folder) don't contain the necessary " - "metadata and will not work.\n\n" - "For example, if you're using pip, instead of " - "https://github.com/user/proj/archive/master.zip " - "use git+https://github.com/user/proj.git#egg=proj" % config.absolute_root - ) - - -def get_version( - root=".", - version_scheme=DEFAULT_VERSION_SCHEME, - local_scheme=DEFAULT_LOCAL_SCHEME, - write_to=None, - write_to_template=None, - relative_to=None, - tag_regex=DEFAULT_TAG_REGEX, - parentdir_prefix_version=None, - fallback_version=None, - fallback_root=".", - parse=None, - git_describe_command=None, - dist_name=None, - version_cls=None, - normalize=True, - search_parent_directories=False, -): - """ - If supplied, relative_to should be a file from which root may - be resolved. Typically called by a script or module that is not - in the root of the repository to direct setuptools_scm to the - root of the repository by supplying ``__file__``. - """ - - config = Configuration(**locals()) - return _get_version(config) - - -def _get_version(config): - parsed_version = _do_parse(config) - - if parsed_version: - version_string = format_version( - parsed_version, - version_scheme=config.version_scheme, - local_scheme=config.local_scheme, - ) - dump_version( - root=config.root, - version=version_string, - write_to=config.write_to, - template=config.write_to_template, - ) - - return version_string - - -# Public API -__all__ = [ - "get_version", - "dump_version", - "version_from_scm", - "Configuration", - "DEFAULT_VERSION_SCHEME", - "DEFAULT_LOCAL_SCHEME", - "DEFAULT_TAG_REGEX", - "Version", - "NonNormalizedVersion", - # TODO: are the symbols below part of public API ? - "function_has_arg", - "trace", - "format_version", - "meta", - "iter_matching_entrypoints", -] diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__main__.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__main__.py deleted file mode 100644 index f3377b055..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/__main__.py +++ /dev/null @@ -1,15 +0,0 @@ -import sys - -from setuptools_scm import get_version -from setuptools_scm.integration import find_files - - -def main() -> None: - print("Guessed Version", get_version()) - if "ls" in sys.argv: - for fname in find_files("."): - print(fname) - - -if __name__ == "__main__": - main() diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/_version_cls.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/_version_cls.py deleted file mode 100644 index 0cefb2679..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/_version_cls.py +++ /dev/null @@ -1,49 +0,0 @@ -try: - from packaging.version import Version - - assert hasattr(Version, "release") -except ImportError: - from pkg_resources._vendor.packaging.version import Version as SetuptoolsVersion - - try: - SetuptoolsVersion.release - Version = SetuptoolsVersion - except AttributeError: - - class Version(SetuptoolsVersion): # type: ignore - @property - def release(self): - return self._version.release - - @property - def dev(self): - return self._version.dev - - @property - def local(self): - return self._version.local - - -class NonNormalizedVersion(Version): - """A non-normalizing version handler. - - You can use this class to preserve version verification but skip normalization. - For example you can use this to avoid git release candidate version tags - ("1.0.0-rc1") to be normalized to "1.0.0rc1". Only use this if you fully - trust the version tags. - """ - - def __init__(self, version): - # parse and validate using parent - super().__init__(version) - - # store raw for str - self._raw_version = version - - def __str__(self): - # return the non-normalized version (parent returns the normalized) - return self._raw_version - - def __repr__(self): - # same pattern as parent - return f"" diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/config.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/config.py deleted file mode 100644 index 6bcf446f3..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/config.py +++ /dev/null @@ -1,212 +0,0 @@ -""" configuration """ -import os -import re -import warnings - -from ._version_cls import NonNormalizedVersion -from ._version_cls import Version -from .utils import trace - -DEFAULT_TAG_REGEX = r"^(?:[\w-]+-)?(?P[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$" -DEFAULT_VERSION_SCHEME = "guess-next-dev" -DEFAULT_LOCAL_SCHEME = "node-and-date" - - -def _check_tag_regex(value): - if not value: - value = DEFAULT_TAG_REGEX - regex = re.compile(value) - - group_names = regex.groupindex.keys() - if regex.groups == 0 or (regex.groups > 1 and "version" not in group_names): - warnings.warn( - "Expected tag_regex to contain a single match group or a group named" - " 'version' to identify the version part of any tag." - ) - - return regex - - -def _check_absolute_root(root, relative_to): - trace("l", repr(locals())) - if relative_to: - if os.path.isabs(root) and not root.startswith(relative_to): - warnings.warn( - "absolute root path '%s' overrides relative_to '%s'" - % (root, relative_to) - ) - if os.path.isdir(relative_to): - warnings.warn( - "relative_to is expected to be a file," - " its the directory %r\n" - "assuming the parent directory was passed" % (relative_to,) - ) - trace("dir", relative_to) - root = os.path.join(relative_to, root) - else: - trace("file", relative_to) - root = os.path.join(os.path.dirname(relative_to), root) - return os.path.abspath(root) - - -def _lazy_tomli_load(data): - from tomli import loads - - return loads(data) - - -class Configuration: - """Global configuration model""" - - def __init__( - self, - relative_to=None, - root=".", - version_scheme=DEFAULT_VERSION_SCHEME, - local_scheme=DEFAULT_LOCAL_SCHEME, - write_to=None, - write_to_template=None, - tag_regex=DEFAULT_TAG_REGEX, - parentdir_prefix_version=None, - fallback_version=None, - fallback_root=".", - parse=None, - git_describe_command=None, - dist_name=None, - version_cls=None, - normalize=True, - search_parent_directories=False, - ): - # TODO: - self._relative_to = relative_to - self._root = "." - - self.root = root - self.version_scheme = version_scheme - self.local_scheme = local_scheme - self.write_to = write_to - self.write_to_template = write_to_template - self.parentdir_prefix_version = parentdir_prefix_version - self.fallback_version = fallback_version - self.fallback_root = fallback_root - self.parse = parse - self.tag_regex = tag_regex - self.git_describe_command = git_describe_command - self.dist_name = dist_name - self.search_parent_directories = search_parent_directories - self.parent = None - - if not normalize: - # `normalize = False` means `version_cls = NonNormalizedVersion` - if version_cls is not None: - raise ValueError( - "Providing a custom `version_cls` is not permitted when " - "`normalize=False`" - ) - self.version_cls = NonNormalizedVersion - else: - # Use `version_cls` if provided, default to packaging or pkg_resources - if version_cls is None: - version_cls = Version - elif isinstance(version_cls, str): - try: - # Not sure this will work in old python - import importlib - - pkg, cls_name = version_cls.rsplit(".", 1) - version_cls_host = importlib.import_module(pkg) - version_cls = getattr(version_cls_host, cls_name) - except: # noqa - raise ValueError(f"Unable to import version_cls='{version_cls}'") - self.version_cls = version_cls - - @property - def fallback_root(self): - return self._fallback_root - - @fallback_root.setter - def fallback_root(self, value): - self._fallback_root = os.path.abspath(value) - - @property - def absolute_root(self): - return self._absolute_root - - @property - def relative_to(self): - return self._relative_to - - @relative_to.setter - def relative_to(self, value): - self._absolute_root = _check_absolute_root(self._root, value) - self._relative_to = value - trace("root", repr(self._absolute_root)) - trace("relative_to", repr(value)) - - @property - def root(self): - return self._root - - @root.setter - def root(self, value): - self._absolute_root = _check_absolute_root(value, self._relative_to) - self._root = value - trace("root", repr(self._absolute_root)) - trace("relative_to", repr(self._relative_to)) - - @property - def tag_regex(self): - return self._tag_regex - - @tag_regex.setter - def tag_regex(self, value): - self._tag_regex = _check_tag_regex(value) - - @classmethod - def from_file( - cls, - name: str = "pyproject.toml", - dist_name=None, # type: str | None - _load_toml=_lazy_tomli_load, - ): - """ - Read Configuration from pyproject.toml (or similar). - Raises exceptions when file is not found or toml is - not installed or the file has invalid format or does - not contain the [tool.setuptools_scm] section. - """ - - with open(name, encoding="UTF-8") as strm: - data = strm.read() - defn = _load_toml(data) - try: - section = defn.get("tool", {})["setuptools_scm"] - except LookupError as e: - raise LookupError( - f"{name} does not contain a tool.setuptools_scm section" - ) from e - if "dist_name" in section: - if dist_name is None: - dist_name = section.pop("dist_name") - else: - assert dist_name == section["dist_name"] - del section["dist_name"] - if dist_name is None: - if "project" in defn: - # minimal pep 621 support for figuring the pretend keys - dist_name = defn["project"].get("name") - if dist_name is None: - dist_name = _read_dist_name_from_setup_cfg() - - return cls(dist_name=dist_name, **section) - - -def _read_dist_name_from_setup_cfg(): - - # minimal effort to read dist_name off setup.cfg metadata - import configparser - - parser = configparser.ConfigParser() - parser.read(["setup.cfg"]) - dist_name = parser.get("metadata", "name", fallback=None) - return dist_name diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/discover.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/discover.py deleted file mode 100644 index f2aee17a8..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/discover.py +++ /dev/null @@ -1,58 +0,0 @@ -import os - -from .config import Configuration -from .utils import iter_entry_points -from .utils import trace - - -def walk_potential_roots(root, search_parents=True): - """ - Iterate though a path and each of its parents. - :param root: File path. - :param search_parents: If ``False`` the parents are not considered. - """ - - if not search_parents: - yield root - return - - tail = root - - while tail: - yield root - root, tail = os.path.split(root) - - -def match_entrypoint(root, name): - """ - Consider a ``root`` as entry-point. - :param root: File path. - :param name: Subdirectory name. - :return: ``True`` if a subdirectory ``name`` exits in ``root``. - """ - - if os.path.exists(os.path.join(root, name)): - if not os.path.isabs(name): - return True - trace("ignoring bad ep", name) - - return False - - -def iter_matching_entrypoints(root, entrypoint, config: Configuration): - """ - Consider different entry-points in ``root`` and optionally its parents. - :param root: File path. - :param entrypoint: Entry-point to consider. - :param config: Configuration, - read ``search_parent_directories``, write found parent to ``parent``. - """ - - trace("looking for ep", entrypoint, root) - - for wd in walk_potential_roots(root, config.search_parent_directories): - for ep in iter_entry_points(entrypoint): - if match_entrypoint(wd, ep.name): - trace("found ep", ep, "in", wd) - config.parent = wd - yield ep diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder.py deleted file mode 100644 index 466602482..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder.py +++ /dev/null @@ -1,70 +0,0 @@ -import os - -from .utils import trace - - -def scm_find_files(path, scm_files, scm_dirs): - """ setuptools compatible file finder that follows symlinks - - - path: the root directory from which to search - - scm_files: set of scm controlled files and symlinks - (including symlinks to directories) - - scm_dirs: set of scm controlled directories - (including directories containing no scm controlled files) - - scm_files and scm_dirs must be absolute with symlinks resolved (realpath), - with normalized case (normcase) - - Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\ - adding-support-for-revision-control-systems - """ - realpath = os.path.normcase(os.path.realpath(path)) - seen = set() - res = [] - for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True): - # dirpath with symlinks resolved - realdirpath = os.path.normcase(os.path.realpath(dirpath)) - - def _link_not_in_scm(n): - fn = os.path.join(realdirpath, os.path.normcase(n)) - return os.path.islink(fn) and fn not in scm_files - - if realdirpath not in scm_dirs: - # directory not in scm, don't walk it's content - dirnames[:] = [] - continue - if os.path.islink(dirpath) and not os.path.relpath( - realdirpath, realpath - ).startswith(os.pardir): - # a symlink to a directory not outside path: - # we keep it in the result and don't walk its content - res.append(os.path.join(path, os.path.relpath(dirpath, path))) - dirnames[:] = [] - continue - if realdirpath in seen: - # symlink loop protection - dirnames[:] = [] - continue - dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)] - for filename in filenames: - if _link_not_in_scm(filename): - continue - # dirpath + filename with symlinks preserved - fullfilename = os.path.join(dirpath, filename) - if os.path.normcase(os.path.realpath(fullfilename)) in scm_files: - res.append(os.path.join(path, os.path.relpath(fullfilename, realpath))) - seen.add(realdirpath) - return res - - -def is_toplevel_acceptable(toplevel): - """ """ - if toplevel is None: - return False - - ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep) - ignored = [os.path.normcase(p) for p in ignored] - - trace(toplevel, ignored) - - return toplevel not in ignored diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_git.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_git.py deleted file mode 100644 index c6f96d8ac..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_git.py +++ /dev/null @@ -1,93 +0,0 @@ -import logging -import os -import subprocess -import tarfile - -from .file_finder import is_toplevel_acceptable -from .file_finder import scm_find_files -from .utils import do_ex -from .utils import trace - -log = logging.getLogger(__name__) - - -def _git_toplevel(path): - try: - cwd = os.path.abspath(path or ".") - out, err, ret = do_ex(["git", "rev-parse", "HEAD"], cwd=cwd) - if ret != 0: - # BAIL if there is no commit - log.error("listing git files failed - pretending there aren't any") - return None - out, err, ret = do_ex( - ["git", "rev-parse", "--show-prefix"], - cwd=cwd, - ) - if ret != 0: - return None - out = out.strip()[:-1] # remove the trailing pathsep - if not out: - out = cwd - else: - # Here, ``out`` is a relative path to root of git. - # ``cwd`` is absolute path to current working directory. - # the below method removes the length of ``out`` from - # ``cwd``, which gives the git toplevel - assert cwd.replace("\\", "/").endswith(out), f"cwd={cwd!r}\nout={out!r}" - # In windows cwd contains ``\`` which should be replaced by ``/`` - # for this assertion to work. Length of string isn't changed by replace - # ``\\`` is just and escape for `\` - out = cwd[: -len(out)] - trace("find files toplevel", out) - return os.path.normcase(os.path.realpath(out.strip())) - except subprocess.CalledProcessError: - # git returned error, we are not in a git repo - return None - except OSError: - # git command not found, probably - return None - - -def _git_interpret_archive(fd, toplevel): - with tarfile.open(fileobj=fd, mode="r|*") as tf: - git_files = set() - git_dirs = {toplevel} - for member in tf.getmembers(): - name = os.path.normcase(member.name).replace("/", os.path.sep) - if member.type == tarfile.DIRTYPE: - git_dirs.add(name) - else: - git_files.add(name) - return git_files, git_dirs - - -def _git_ls_files_and_dirs(toplevel): - # use git archive instead of git ls-file to honor - # export-ignore git attribute - - cmd = ["git", "archive", "--prefix", toplevel + os.path.sep, "HEAD"] - proc = subprocess.Popen( - cmd, stdout=subprocess.PIPE, cwd=toplevel, stderr=subprocess.DEVNULL - ) - try: - try: - return _git_interpret_archive(proc.stdout, toplevel) - finally: - # ensure we avoid resource warnings by cleaning up the process - proc.stdout.close() - proc.terminate() - except Exception: - if proc.wait() != 0: - log.error("listing git files failed - pretending there aren't any") - return (), () - - -def git_find_files(path=""): - toplevel = _git_toplevel(path) - if not is_toplevel_acceptable(toplevel): - return [] - fullpath = os.path.abspath(os.path.normpath(path)) - if not fullpath.startswith(toplevel): - trace("toplevel mismatch", toplevel, fullpath) - git_files, git_dirs = _git_ls_files_and_dirs(toplevel) - return scm_find_files(path, git_files, git_dirs) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_hg.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_hg.py deleted file mode 100644 index 53878c6a3..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/file_finder_hg.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import subprocess - -from .file_finder import is_toplevel_acceptable -from .file_finder import scm_find_files -from .utils import do_ex - - -def _hg_toplevel(path): - try: - with open(os.devnull, "wb") as devnull: - out = subprocess.check_output( - ["hg", "root"], - cwd=(path or "."), - universal_newlines=True, - stderr=devnull, - ) - return os.path.normcase(os.path.realpath(out.strip())) - except subprocess.CalledProcessError: - # hg returned error, we are not in a mercurial repo - return None - except OSError: - # hg command not found, probably - return None - - -def _hg_ls_files_and_dirs(toplevel): - hg_files = set() - hg_dirs = {toplevel} - out, err, ret = do_ex(["hg", "files"], cwd=toplevel) - if ret: - (), () - for name in out.splitlines(): - name = os.path.normcase(name).replace("/", os.path.sep) - fullname = os.path.join(toplevel, name) - hg_files.add(fullname) - dirname = os.path.dirname(fullname) - while len(dirname) > len(toplevel) and dirname not in hg_dirs: - hg_dirs.add(dirname) - dirname = os.path.dirname(dirname) - return hg_files, hg_dirs - - -def hg_find_files(path=""): - toplevel = _hg_toplevel(path) - if not is_toplevel_acceptable(toplevel): - return [] - hg_files, hg_dirs = _hg_ls_files_and_dirs(toplevel) - return scm_find_files(path, hg_files, hg_dirs) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/git.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/git.py deleted file mode 100644 index 22e870c41..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/git.py +++ /dev/null @@ -1,220 +0,0 @@ -import os -import warnings -from datetime import date -from datetime import datetime -from os.path import isfile -from os.path import join -from os.path import samefile - -from .config import Configuration -from .scm_workdir import Workdir -from .utils import do_ex -from .utils import require_command -from .utils import trace -from .version import meta - -DEFAULT_DESCRIBE = "git describe --dirty --tags --long --match *[0-9]*" - - -class GitWorkdir(Workdir): - """experimental, may change at any time""" - - COMMAND = "git" - - @classmethod - def from_potential_worktree(cls, wd): - require_command(cls.COMMAND) - wd = os.path.abspath(wd) - real_wd, _, ret = do_ex("git rev-parse --show-prefix", wd) - real_wd = real_wd[:-1] # remove the trailing pathsep - if ret: - return - if not real_wd: - real_wd = wd - else: - assert wd.replace("\\", "/").endswith(real_wd) - # In windows wd contains ``\`` which should be replaced by ``/`` - # for this assertion to work. Length of string isn't changed by replace - # ``\\`` is just and escape for `\` - real_wd = wd[: -len(real_wd)] - trace("real root", real_wd) - if not samefile(real_wd, wd): - return - - return cls(real_wd) - - def is_dirty(self): - out, _, _ = self.do_ex("git status --porcelain --untracked-files=no") - return bool(out) - - def get_branch(self): - branch, err, ret = self.do_ex("git rev-parse --abbrev-ref HEAD") - if ret: - trace("branch err", branch, err, ret) - branch, err, ret = self.do_ex("git symbolic-ref --short HEAD") - if ret: - trace("branch err (symbolic-ref)", branch, err, ret) - branch = None - return branch - - def get_head_date(self): - timestamp, err, ret = self.do_ex("git log -n 1 HEAD --format=%cI") - if ret: - trace("timestamp err", timestamp, err, ret) - return - # TODO, when dropping python3.6 use fromiso - date_part = timestamp.split("T")[0] - if "%c" in date_part: - trace("git too old -> timestamp is ", timestamp) - return None - return datetime.strptime(date_part, r"%Y-%m-%d").date() - - def is_shallow(self): - return isfile(join(self.path, ".git/shallow")) - - def fetch_shallow(self): - self.do_ex("git fetch --unshallow") - - def node(self): - node, _, ret = self.do_ex("git rev-parse --verify --quiet HEAD") - if not ret: - return node[:7] - - def count_all_nodes(self): - revs, _, _ = self.do_ex("git rev-list HEAD") - return revs.count("\n") + 1 - - def default_describe(self): - return self.do_ex(DEFAULT_DESCRIBE) - - -def warn_on_shallow(wd): - """experimental, may change at any time""" - if wd.is_shallow(): - warnings.warn(f'"{wd.path}" is shallow and may cause errors') - - -def fetch_on_shallow(wd): - """experimental, may change at any time""" - if wd.is_shallow(): - warnings.warn(f'"{wd.path}" was shallow, git fetch was used to rectify') - wd.fetch_shallow() - - -def fail_on_shallow(wd): - """experimental, may change at any time""" - if wd.is_shallow(): - raise ValueError( - f'{wd.path} is shallow, please correct with "git fetch --unshallow"' - ) - - -def get_working_directory(config): - """ - Return the working directory (``GitWorkdir``). - """ - - if config.parent: - return GitWorkdir.from_potential_worktree(config.parent) - - if config.search_parent_directories: - return search_parent(config.absolute_root) - - return GitWorkdir.from_potential_worktree(config.absolute_root) - - -def parse(root, describe_command=None, pre_parse=warn_on_shallow, config=None): - """ - :param pre_parse: experimental pre_parse action, may change at any time - """ - if not config: - config = Configuration(root=root) - - wd = get_working_directory(config) - if wd: - return _git_parse_inner( - config, wd, describe_command=describe_command, pre_parse=pre_parse - ) - - -def _git_parse_inner(config, wd, pre_parse=None, describe_command=None): - if pre_parse: - pre_parse(wd) - - if config.git_describe_command is not None: - describe_command = config.git_describe_command - - if describe_command is not None: - out, _, ret = wd.do_ex(describe_command) - else: - out, _, ret = wd.default_describe() - - if ret == 0: - tag, distance, node, dirty = _git_parse_describe(out) - if distance == 0 and not dirty: - distance = None - else: - # If 'git git_describe_command' failed, try to get the information otherwise. - tag = "0.0" - node = wd.node() - if node is None: - distance = 0 - else: - distance = wd.count_all_nodes() - node = "g" + node - dirty = wd.is_dirty() - - branch = wd.get_branch() - node_date = wd.get_head_date() or date.today() - - return meta( - tag, - branch=branch, - node=node, - node_date=node_date, - distance=distance, - dirty=dirty, - config=config, - ) - - -def _git_parse_describe(describe_output): - # 'describe_output' looks e.g. like 'v1.5.0-0-g4060507' or - # 'v1.15.1rc1-37-g9bd1298-dirty'. - - if describe_output.endswith("-dirty"): - dirty = True - describe_output = describe_output[:-6] - else: - dirty = False - - tag, number, node = describe_output.rsplit("-", 2) - number = int(number) - return tag, number, node, dirty - - -def search_parent(dirname): - """ - Walk up the path to find the `.git` directory. - :param dirname: Directory from which to start searching. - """ - - # Code based on: - # https://github.com/gitpython-developers/GitPython/blob/main/git/repo/base.py - - curpath = os.path.abspath(dirname) - - while curpath: - - try: - wd = GitWorkdir.from_potential_worktree(curpath) - except Exception: - wd = None - - if wd is not None: - return wd - - curpath, tail = os.path.split(curpath) - - if not tail: - return None diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hacks.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hacks.py deleted file mode 100644 index 849f21ffe..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hacks.py +++ /dev/null @@ -1,40 +0,0 @@ -import os - -from .utils import data_from_mime -from .utils import trace -from .version import meta -from .version import tag_to_version - - -def parse_pkginfo(root, config=None): - - pkginfo = os.path.join(root, "PKG-INFO") - trace("pkginfo", pkginfo) - data = data_from_mime(pkginfo) - version = data.get("Version") - if version != "UNKNOWN": - return meta(version, preformatted=True, config=config) - - -def parse_pip_egg_info(root, config=None): - pipdir = os.path.join(root, "pip-egg-info") - if not os.path.isdir(pipdir): - return - items = os.listdir(pipdir) - trace("pip-egg-info", pipdir, items) - if not items: - return - return parse_pkginfo(os.path.join(pipdir, items[0]), config=config) - - -def fallback_version(root, config=None): - if config.parentdir_prefix_version is not None: - _, parent_name = os.path.split(os.path.abspath(root)) - if parent_name.startswith(config.parentdir_prefix_version): - version = tag_to_version( - parent_name[len(config.parentdir_prefix_version) :], config - ) - if version is not None: - return meta(str(version), preformatted=True, config=config) - if config.fallback_version is not None: - return meta(config.fallback_version, preformatted=True, config=config) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg.py deleted file mode 100644 index 8166a9072..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg.py +++ /dev/null @@ -1,169 +0,0 @@ -import os -from pathlib import Path - -from .config import Configuration -from .scm_workdir import Workdir -from .utils import data_from_mime -from .utils import do_ex -from .utils import require_command -from .utils import trace -from .version import meta -from .version import tag_to_version - - -class HgWorkdir(Workdir): - - COMMAND = "hg" - - @classmethod - def from_potential_worktree(cls, wd): - require_command(cls.COMMAND) - root, err, ret = do_ex("hg root", wd) - if ret: - return - return cls(root) - - def get_meta(self, config): - - node, tags, bookmark, node_date = self.hg_log( - ".", "{node}\n{tag}\n{bookmark}\n{date|shortdate}" - ).split("\n") - - # TODO: support bookmarks and topics (but nowadays bookmarks are - # mainly used to emulate Git branches, which is already supported with - # the dedicated class GitWorkdirHgClient) - - branch, dirty, dirty_date = self.do( - ["hg", "id", "-T", "{branch}\n{if(dirty, 1, 0)}\n{date|shortdate}"] - ).split("\n") - dirty = bool(int(dirty)) - - if dirty: - date = dirty_date - else: - date = node_date - - if all(c == "0" for c in node): - trace("initial node", self.path) - return meta("0.0", config=config, dirty=dirty, branch=branch) - - node = "h" + node[:7] - - tags = tags.split() - if "tip" in tags: - # tip is not a real tag - tags = tags.remove("tip") - - if tags: - tag = tags[0] - tag = tag_to_version(tag) - if tag: - return meta(tag, dirty=dirty, branch=branch, config=config) - - try: - tag = self.get_latest_normalizable_tag() - dist = self.get_distance_revs(tag) - if tag == "null": - tag = "0.0" - dist = int(dist) + 1 - - if self.check_changes_since_tag(tag) or dirty: - return meta( - tag, - distance=dist, - node=node, - dirty=dirty, - branch=branch, - config=config, - node_date=date, - ) - else: - return meta(tag, config=config) - - except ValueError: - pass # unpacking failed, old hg - - def hg_log(self, revset, template): - cmd = ["hg", "log", "-r", revset, "-T", template] - return self.do(cmd) - - def get_latest_normalizable_tag(self): - # Gets all tags containing a '.' (see #229) from oldest to newest - outlines = self.hg_log( - revset="ancestors(.) and tag('re:\\.')", - template="{tags}{if(tags, '\n', '')}", - ).split() - if not outlines: - return "null" - tag = outlines[-1].split()[-1] - return tag - - def get_distance_revs(self, rev1, rev2="."): - revset = f"({rev1}::{rev2})" - out = self.hg_log(revset, ".") - return len(out) - 1 - - def check_changes_since_tag(self, tag): - - if tag == "0.0": - return True - - revset = ( - "(branch(.)" # look for revisions in this branch only - f" and tag({tag!r})::." # after the last tag - # ignore commits that only modify .hgtags and nothing else: - " and (merge() or file('re:^(?!\\.hgtags).*$'))" - f" and not tag({tag!r}))" # ignore the tagged commit itself - ) - - return bool(self.hg_log(revset, ".")) - - -def parse(root, config=None): - if not config: - config = Configuration(root=root) - - if os.path.exists(os.path.join(root, ".hg/git")): - paths, _, ret = do_ex("hg path", root) - if not ret: - for line in paths.split("\n"): - if line.startswith("default ="): - path = Path(line.split()[2]) - if path.name.endswith(".git") or (path / ".git").exists(): - from .git import _git_parse_inner - from .hg_git import GitWorkdirHgClient - - wd = GitWorkdirHgClient.from_potential_worktree(root) - if wd: - return _git_parse_inner(config, wd) - - wd = HgWorkdir.from_potential_worktree(config.absolute_root) - - if wd is None: - return - - return wd.get_meta(config) - - -def archival_to_version(data, config: "Configuration | None" = None): - trace("data", data) - node = data.get("node", "")[:12] - if node: - node = "h" + node - if "tag" in data: - return meta(data["tag"], config=config) - elif "latesttag" in data: - return meta( - data["latesttag"], - distance=data["latesttagdistance"], - node=node, - config=config, - ) - else: - return meta("0.0", node=node, config=config) - - -def parse_archival(root, config=None): - archival = os.path.join(root, ".hg_archival.txt") - data = data_from_mime(archival) - return archival_to_version(data, config=config) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg_git.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg_git.py deleted file mode 100644 index b871a3933..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/hg_git.py +++ /dev/null @@ -1,133 +0,0 @@ -import os -from datetime import datetime - -from .git import GitWorkdir -from .hg import HgWorkdir -from .utils import do_ex -from .utils import require_command -from .utils import trace - - -class GitWorkdirHgClient(GitWorkdir, HgWorkdir): - COMMAND = "hg" - - @classmethod - def from_potential_worktree(cls, wd): - require_command(cls.COMMAND) - root, err, ret = do_ex("hg root", wd) - if ret: - return - return cls(root) - - def is_dirty(self): - out, _, _ = self.do_ex("hg id -T '{dirty}'") - return bool(out) - - def get_branch(self): - branch, err, ret = self.do_ex("hg id -T {bookmarks}") - if ret: - trace("branch err", branch, err, ret) - return - return branch - - def get_head_date(self): - date_part, err, ret = self.do_ex("hg log -r . -T {shortdate(date)}") - if ret: - trace("head date err", date_part, err, ret) - return - return datetime.strptime(date_part, r"%Y-%m-%d").date() - - def is_shallow(self): - return False - - def fetch_shallow(self): - pass - - def get_hg_node(self): - node, _, ret = self.do_ex("hg log -r . -T {node}") - if not ret: - return node - - def _hg2git(self, hg_node): - git_node = None - with open(os.path.join(self.path, ".hg/git-mapfile")) as file: - for line in file: - if hg_node in line: - git_node, hg_node = line.split() - break - return git_node - - def node(self): - hg_node = self.get_hg_node() - if hg_node is None: - return - - git_node = self._hg2git(hg_node) - - if git_node is None: - # trying again after hg -> git - self.do_ex("hg gexport") - git_node = self._hg2git(hg_node) - - if git_node is None: - trace("Cannot get git node so we use hg node", hg_node) - - if hg_node == "0" * len(hg_node): - # mimick Git behavior - return None - - return hg_node - - return git_node[:7] - - def count_all_nodes(self): - revs, _, _ = self.do_ex("hg log -r 'ancestors(.)' -T '.'") - return len(revs) - - def default_describe(self): - """ - Tentative to reproduce the output of - - `git describe --dirty --tags --long --match *[0-9]*` - - """ - hg_tags, _, ret = self.do_ex( - [ - "hg", - "log", - "-r", - "(reverse(ancestors(.)) and tag(r're:[0-9]'))", - "-T", - "{tags}{if(tags, ' ', '')}", - ] - ) - if ret: - return None, None, None - hg_tags = hg_tags.split() - - if not hg_tags: - return None, None, None - - git_tags = {} - with open(os.path.join(self.path, ".hg/git-tags")) as file: - for line in file: - node, tag = line.split() - git_tags[tag] = node - - # find the first hg tag which is also a git tag - for tag in hg_tags: - if tag in git_tags: - break - - out, _, ret = self.do_ex(["hg", "log", "-r", f"'{tag}'::.", "-T", "."]) - if ret: - return None, None, None - distance = len(out) - 1 - - node = self.node() - desc = f"{tag}-{distance}-g{node}" - - if self.is_dirty(): - desc += "-dirty" - - return desc, None, 0 diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/integration.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/integration.py deleted file mode 100644 index ad69a3ffa..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/integration.py +++ /dev/null @@ -1,94 +0,0 @@ -import os -import warnings - -import setuptools - -from . import _get_version -from .config import _read_dist_name_from_setup_cfg -from .config import Configuration -from .utils import do -from .utils import iter_entry_points -from .utils import trace - - -def _warn_on_old_setuptools(_version=setuptools.__version__): - if int(_version.split(".")[0]) < 45: - warnings.warn( - RuntimeWarning( - f""" -ERROR: setuptools=={_version} is used in combination with setuptools_scm>=6.x - -Your build configuration is incomplete and previously worked by accident! - - -This happens as setuptools is unable to replace itself when a activated build dependency -requires a more recent setuptools version -(it does not respect "setuptools>X" in setup_requires). - - -setuptools>=31 is required for setup.cfg metadata support -setuptools>=42 is required for pyproject.toml configuration support - -Suggested workarounds if applicable: - - preinstalling build dependencies like setuptools_scm before running setup.py - - installing setuptools_scm using the system package manager to ensure consistency - - migrating from the deprecated setup_requires mechanism to pep517/518 - and using a pyproject.toml to declare build dependencies - which are reliably pre-installed before running the build tools -""" - ) - ) - - -_warn_on_old_setuptools() - - -def version_keyword(dist: setuptools.Distribution, keyword, value): - if not value: - return - if value is True: - value = {} - if getattr(value, "__call__", None): - value = value() - assert ( - "dist_name" not in value - ), "dist_name may not be specified in the setup keyword " - - trace( - "version keyword", - vars(dist.metadata), - ) - dist_name = dist.metadata.name # type: str | None - if dist_name is None: - dist_name = _read_dist_name_from_setup_cfg() - config = Configuration(dist_name=dist_name, **value) - dist.metadata.version = _get_version(config) - - -def find_files(path=""): - for ep in iter_entry_points("setuptools_scm.files_command"): - command = ep.load() - if isinstance(command, str): - # this technique is deprecated - res = do(ep.load(), path or ".").splitlines() - else: - res = command(path) - if res: - return res - return [] - - -def infer_version(dist: setuptools.Distribution): - trace( - "finalize hook", - vars(dist.metadata), - ) - dist_name = dist.metadata.name - if not os.path.isfile("pyproject.toml"): - return - try: - config = Configuration.from_file(dist_name=dist_name) - except LookupError as e: - trace(e) - else: - dist.metadata.version = _get_version(config) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/scm_workdir.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/scm_workdir.py deleted file mode 100644 index 142065f59..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/scm_workdir.py +++ /dev/null @@ -1,15 +0,0 @@ -from .utils import do -from .utils import do_ex -from .utils import require_command - - -class Workdir: - def __init__(self, path): - require_command(self.COMMAND) - self.path = path - - def do_ex(self, cmd): - return do_ex(cmd, cwd=self.path) - - def do(self, cmd): - return do(cmd, cwd=self.path) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/utils.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/utils.py deleted file mode 100644 index 2e84f870a..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/utils.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -utils -""" -import inspect -import os -import platform -import shlex -import subprocess -import sys -import warnings -from typing import Optional - -DEBUG = bool(os.environ.get("SETUPTOOLS_SCM_DEBUG")) -IS_WINDOWS = platform.system() == "Windows" - - -def no_git_env(env): - # adapted from pre-commit - # Too many bugs dealing with environment variables and GIT: - # https://github.com/pre-commit/pre-commit/issues/300 - # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running - # pre-commit hooks - # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE - # while running pre-commit hooks in submodules. - # GIT_DIR: Causes git clone to clone wrong thing - # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit - for k, v in env.items(): - if k.startswith("GIT_"): - trace(k, v) - return { - k: v - for k, v in env.items() - if not k.startswith("GIT_") - or k in ("GIT_EXEC_PATH", "GIT_SSH", "GIT_SSH_COMMAND") - } - - -def trace(*k) -> None: - if DEBUG: - print(*k, file=sys.stderr, flush=True) - - -def ensure_stripped_str(str_or_bytes): - if isinstance(str_or_bytes, str): - return str_or_bytes.strip() - else: - return str_or_bytes.decode("utf-8", "surrogateescape").strip() - - -def _always_strings(env_dict): - """ - On Windows and Python 2, environment dictionaries must be strings - and not unicode. - """ - if IS_WINDOWS: - env_dict.update((key, str(value)) for (key, value) in env_dict.items()) - return env_dict - - -def _popen_pipes(cmd, cwd): - return subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - cwd=str(cwd), - env=_always_strings( - dict( - no_git_env(os.environ), - # os.environ, - # try to disable i18n - LC_ALL="C", - LANGUAGE="", - HGPLAIN="1", - ) - ), - ) - - -def do_ex(cmd, cwd="."): - trace("cmd", repr(cmd)) - trace(" in", cwd) - if os.name == "posix" and not isinstance(cmd, (list, tuple)): - cmd = shlex.split(cmd) - - p = _popen_pipes(cmd, cwd) - out, err = p.communicate() - if out: - trace("out", repr(out)) - if err: - trace("err", repr(err)) - if p.returncode: - trace("ret", p.returncode) - return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode - - -def do(cmd, cwd="."): - out, err, ret = do_ex(cmd, cwd) - if ret: - print(err) - return out - - -def data_from_mime(path): - with open(path, encoding="utf-8") as fp: - content = fp.read() - trace("content", repr(content)) - # the complex conditions come from reading pseudo-mime-messages - data = dict(x.split(": ", 1) for x in content.splitlines() if ": " in x) - trace("data", data) - return data - - -def function_has_arg(fn, argname): - assert inspect.isfunction(fn) - - argspec = inspect.signature(fn).parameters - - return argname in argspec - - -def has_command(name, warn=True): - try: - p = _popen_pipes([name, "help"], ".") - except OSError: - trace(*sys.exc_info()) - res = False - else: - p.communicate() - res = not p.returncode - if not res and warn: - warnings.warn("%r was not found" % name, category=RuntimeWarning) - return res - - -def require_command(name): - if not has_command(name, warn=False): - raise OSError("%r was not found" % name) - - -try: - from importlib.metadata import entry_points # type: ignore -except ImportError: - from pkg_resources import iter_entry_points -else: - - def iter_entry_points(group: str, name: Optional[str] = None): - all_eps = entry_points() - if hasattr(all_eps, "select"): - eps = all_eps.select(group=group) - else: - eps = all_eps[group] - if name is None: - return iter(eps) - return (ep for ep in eps if ep.name == name) diff --git a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/version.py b/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/version.py deleted file mode 100644 index 91e25f6a3..000000000 --- a/.eggs/setuptools_scm-6.3.2-py3.8.egg/setuptools_scm/version.py +++ /dev/null @@ -1,460 +0,0 @@ -import datetime -import os -import re -import time -import warnings - -from .config import Configuration -from .config import Version as PkgVersion -from .utils import iter_entry_points -from .utils import trace - - -SEMVER_MINOR = 2 -SEMVER_PATCH = 3 -SEMVER_LEN = 3 - - -def _parse_version_tag(tag, config): - tagstring = tag if isinstance(tag, str) else str(tag) - match = config.tag_regex.match(tagstring) - - result = None - if match: - if len(match.groups()) == 1: - key = 1 - else: - key = "version" - - result = { - "version": match.group(key), - "prefix": match.group(0)[: match.start(key)], - "suffix": match.group(0)[match.end(key) :], - } - - trace(f"tag '{tag}' parsed to {result}") - return result - - -def callable_or_entrypoint(group, callable_or_name): - trace("ep", (group, callable_or_name)) - - if callable(callable_or_name): - return callable_or_name - - for ep in iter_entry_points(group, callable_or_name): - trace("ep found:", ep.name) - return ep.load() - - -def tag_to_version(tag, config: "Configuration | None" = None): - """ - take a tag that might be prefixed with a keyword and return only the version part - :param config: optional configuration object - """ - trace("tag", tag) - - if not config: - config = Configuration() - - tagdict = _parse_version_tag(tag, config) - if not isinstance(tagdict, dict) or not tagdict.get("version", None): - warnings.warn(f"tag {tag!r} no version found") - return None - - version = tagdict["version"] - trace("version pre parse", version) - - if tagdict.get("suffix", ""): - warnings.warn( - "tag {!r} will be stripped of its suffix '{}'".format( - tag, tagdict["suffix"] - ) - ) - - version = config.version_cls(version) - trace("version", repr(version)) - - return version - - -def tags_to_versions(tags, config=None): - """ - take tags that might be prefixed with a keyword and return only the version part - :param tags: an iterable of tags - :param config: optional configuration object - """ - result = [] - for tag in tags: - tag = tag_to_version(tag, config=config) - if tag: - result.append(tag) - return result - - -class ScmVersion: - def __init__( - self, - tag_version, - distance=None, - node=None, - dirty=False, - preformatted=False, - branch=None, - config=None, - node_date=None, - **kw, - ): - if kw: - trace("unknown args", kw) - self.tag = tag_version - if dirty and distance is None: - distance = 0 - self.distance = distance - self.node = node - self.node_date = node_date - self.time = datetime.datetime.utcfromtimestamp( - int(os.environ.get("SOURCE_DATE_EPOCH", time.time())) - ) - self._extra = kw - self.dirty = dirty - self.preformatted = preformatted - self.branch = branch - self.config = config - - @property - def extra(self): - warnings.warn( - "ScmVersion.extra is deprecated and will be removed in future", - category=DeprecationWarning, - stacklevel=2, - ) - return self._extra - - @property - def exact(self): - return self.distance is None - - def __repr__(self): - return self.format_with( - "" - ) - - def format_with(self, fmt, **kw): - return fmt.format( - time=self.time, - tag=self.tag, - distance=self.distance, - node=self.node, - dirty=self.dirty, - branch=self.branch, - node_date=self.node_date, - **kw, - ) - - def format_choice(self, clean_format, dirty_format, **kw): - return self.format_with(dirty_format if self.dirty else clean_format, **kw) - - def format_next_version(self, guess_next, fmt="{guessed}.dev{distance}", **kw): - guessed = guess_next(self.tag, **kw) - return self.format_with(fmt, guessed=guessed) - - -def _parse_tag(tag, preformatted, config: "Configuration|None"): - if preformatted: - return tag - if config is None or not isinstance(tag, config.version_cls): - tag = tag_to_version(tag, config) - return tag - - -def meta( - tag, - distance: "int|None" = None, - dirty: bool = False, - node: "str|None" = None, - preformatted: bool = False, - branch: "str|None" = None, - config: "Configuration|None" = None, - **kw, -): - if not config: - warnings.warn( - "meta invoked without explicit configuration," - " will use defaults where required." - ) - parsed_version = _parse_tag(tag, preformatted, config) - trace("version", tag, "->", parsed_version) - assert parsed_version is not None, "Can't parse version %s" % tag - return ScmVersion( - parsed_version, distance, node, dirty, preformatted, branch, config, **kw - ) - - -def guess_next_version(tag_version: ScmVersion): - version = _strip_local(str(tag_version)) - return _bump_dev(version) or _bump_regex(version) - - -def _strip_local(version_string): - public, sep, local = version_string.partition("+") - return public - - -def _bump_dev(version): - if ".dev" not in version: - return - - prefix, tail = version.rsplit(".dev", 1) - if tail != "0": - raise ValueError( - "choosing custom numbers for the `.devX` distance " - "is not supported.\n " - "The {version} can't be bumped\n" - "Please drop the tag or create a new supported one".format(version=version) - ) - return prefix - - -def _bump_regex(version): - match = re.match(r"(.*?)(\d+)$", version) - if match is None: - raise ValueError( - "{version} does not end with a number to bump, " - "please correct or use a custom version scheme".format(version=version) - ) - else: - prefix, tail = match.groups() - return "%s%d" % (prefix, int(tail) + 1) - - -def guess_next_dev_version(version): - if version.exact: - return version.format_with("{tag}") - else: - return version.format_next_version(guess_next_version) - - -def guess_next_simple_semver(version, retain, increment=True): - try: - parts = [int(i) for i in str(version).split(".")[:retain]] - except ValueError: - raise ValueError(f"{version} can't be parsed as numeric version") - while len(parts) < retain: - parts.append(0) - if increment: - parts[-1] += 1 - while len(parts) < SEMVER_LEN: - parts.append(0) - return ".".join(str(i) for i in parts) - - -def simplified_semver_version(version): - if version.exact: - return guess_next_simple_semver(version.tag, retain=SEMVER_LEN, increment=False) - else: - if version.branch is not None and "feature" in version.branch: - return version.format_next_version( - guess_next_simple_semver, retain=SEMVER_MINOR - ) - else: - return version.format_next_version( - guess_next_simple_semver, retain=SEMVER_PATCH - ) - - -def release_branch_semver_version(version): - if version.exact: - return version.format_with("{tag}") - if version.branch is not None: - # Does the branch name (stripped of namespace) parse as a version? - branch_ver = _parse_version_tag(version.branch.split("/")[-1], version.config) - if branch_ver is not None: - branch_ver = branch_ver["version"] - if branch_ver[0] == "v": - # Allow branches that start with 'v', similar to Version. - branch_ver = branch_ver[1:] - # Does the branch version up to the minor part match the tag? If not it - # might be like, an issue number or something and not a version number, so - # we only want to use it if it matches. - tag_ver_up_to_minor = str(version.tag).split(".")[:SEMVER_MINOR] - branch_ver_up_to_minor = branch_ver.split(".")[:SEMVER_MINOR] - if branch_ver_up_to_minor == tag_ver_up_to_minor: - # We're in a release/maintenance branch, next is a patch/rc/beta bump: - return version.format_next_version(guess_next_version) - # We're in a development branch, next is a minor bump: - return version.format_next_version(guess_next_simple_semver, retain=SEMVER_MINOR) - - -def release_branch_semver(version): - warnings.warn( - "release_branch_semver is deprecated and will be removed in future. " - + "Use release_branch_semver_version instead", - category=DeprecationWarning, - stacklevel=2, - ) - return release_branch_semver_version(version) - - -def no_guess_dev_version(version): - if version.exact: - return version.format_with("{tag}") - else: - return version.format_with("{tag}.post1.dev{distance}") - - -def date_ver_match(ver): - match = re.match( - ( - r"^(?P(?P\d{2}|\d{4})(?:\.\d{1,2}){2})" - r"(?:\.(?P\d*)){0,1}?$" - ), - str(ver), - ) - return match - - -def guess_next_date_ver(version, node_date=None, date_fmt=None, version_cls=None): - """ - same-day -> patch +1 - other-day -> today - - distance is always added as .devX - """ - match = date_ver_match(version) - if match is None: - warnings.warn( - f"{version} does not correspond to a valid versioning date, " - "assuming legacy version" - ) - if date_fmt is None: - date_fmt = "%y.%m.%d" - - # deduct date format if not provided - if date_fmt is None: - date_fmt = "%Y.%m.%d" if len(match.group("year")) == 4 else "%y.%m.%d" - head_date = node_date or datetime.date.today() - # compute patch - if match is None: - tag_date = datetime.date.today() - else: - tag_date = datetime.datetime.strptime(match.group("date"), date_fmt).date() - if tag_date == head_date: - patch = "0" if match is None else (match.group("patch") or "0") - patch = int(patch) + 1 - else: - if tag_date > head_date and match is not None: - # warn on future times - warnings.warn( - "your previous tag ({}) is ahead your node date ({})".format( - tag_date, head_date - ) - ) - patch = 0 - next_version = "{node_date:{date_fmt}}.{patch}".format( - node_date=head_date, date_fmt=date_fmt, patch=patch - ) - # rely on the Version object to ensure consistency (e.g. remove leading 0s) - if version_cls is None: - version_cls = PkgVersion - next_version = str(version_cls(next_version)) - return next_version - - -def calver_by_date(version): - if version.exact and not version.dirty: - return version.format_with("{tag}") - # TODO: move the release-X check to a new scheme - if version.branch is not None and version.branch.startswith("release-"): - branch_ver = _parse_version_tag(version.branch.split("-")[-1], version.config) - if branch_ver is not None: - ver = branch_ver["version"] - match = date_ver_match(ver) - if match: - return ver - return version.format_next_version( - guess_next_date_ver, - node_date=version.node_date, - version_cls=version.config.version_cls, - ) - - -def _format_local_with_time(version, time_format): - - if version.exact or version.node is None: - return version.format_choice( - "", "+d{time:{time_format}}", time_format=time_format - ) - else: - return version.format_choice( - "+{node}", "+{node}.d{time:{time_format}}", time_format=time_format - ) - - -def get_local_node_and_date(version): - return _format_local_with_time(version, time_format="%Y%m%d") - - -def get_local_node_and_timestamp(version, fmt="%Y%m%d%H%M%S"): - return _format_local_with_time(version, time_format=fmt) - - -def get_local_dirty_tag(version): - return version.format_choice("", "+dirty") - - -def get_no_local_node(_): - return "" - - -def postrelease_version(version): - if version.exact: - return version.format_with("{tag}") - else: - return version.format_with("{tag}.post{distance}") - - -def _get_ep(group, name): - for ep in iter_entry_points(group, name): - trace("ep found:", ep.name) - return ep.load() - - -def _iter_version_schemes(entrypoint, scheme_value, _memo=None): - if _memo is None: - _memo = set() - if isinstance(scheme_value, str): - scheme_value = _get_ep(entrypoint, scheme_value) - - if isinstance(scheme_value, (list, tuple)): - for variant in scheme_value: - if variant not in _memo: - _memo.add(variant) - yield from _iter_version_schemes(entrypoint, variant, _memo=_memo) - elif callable(scheme_value): - yield scheme_value - - -def _call_version_scheme(version, entypoint, given_value, default): - for scheme in _iter_version_schemes(entypoint, given_value): - result = scheme(version) - if result is not None: - return result - return default - - -def format_version(version, **config): - trace("scm version", version) - trace("config", config) - if version.preformatted: - return version.tag - main_version = _call_version_scheme( - version, "setuptools_scm.version_scheme", config["version_scheme"], None - ) - trace("version", main_version) - assert main_version is not None - local_version = _call_version_scheme( - version, "setuptools_scm.local_scheme", config["local_scheme"], "+unknown" - ) - trace("local_version", local_version) - return main_version + local_version diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/LICENSE b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/LICENSE deleted file mode 100644 index 89de35479..000000000 --- a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/PKG-INFO deleted file mode 100644 index 78c9f981d..000000000 --- a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/PKG-INFO +++ /dev/null @@ -1,44 +0,0 @@ -Metadata-Version: 2.1 -Name: setuptools-scm-git-archive -Version: 1.1 -Summary: setuptools_scm plugin for git archives -Home-page: https://github.com/Changaco/setuptools_scm_git_archive/ -Author: Changaco -Author-email: changaco@changaco.oy.lc -License: MIT -Keywords: scm vcs version tags git archive -Platform: UNKNOWN - -This is a `setuptools_scm `_ plugin -that adds support for git archives (for example the ones GitHub automatically -generates). - -Note that it only works for archives of tagged commits (because git currently -lacks a format option equivalent to ``git describe --tags``). - -Usage ------ - -Add ``'setuptools_scm_git_archive'`` to the ``setup_requires`` parameter in your -project's ``setup.py`` file: - -.. code:: python - - setup( - ..., - use_scm_version=True, - setup_requires=['setuptools_scm', 'setuptools_scm_git_archive'], - ..., - ) - -Create a ``.git_archival.txt`` file with the following content:: - - ref-names: $Format:%D$ - -Then add this line to the ``.gitattributes`` file:: - - .git_archival.txt export-subst - -Finally, don't forget to commit these two files. - - diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/RECORD b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/RECORD deleted file mode 100644 index fc3cf20eb..000000000 --- a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/RECORD +++ /dev/null @@ -1,7 +0,0 @@ -setuptools_scm_git_archive/__init__.py,sha256=Ds2ZcVtE-4R1sw5Y0Pps3xrRTvl_La4VTjy25hOje5Y,518 -setuptools_scm_git_archive-1.1.dist-info/LICENSE,sha256=iYB6zyMJvShfAzQE7nhYFgLzzZuBmhasLw5fYP9KRz4,1023 -setuptools_scm_git_archive-1.1.dist-info/METADATA,sha256=oM3mrg7VgUdhFGJiSv1AyiOVEh_MOzNlX8Atg8lji10,1150 -setuptools_scm_git_archive-1.1.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110 -setuptools_scm_git_archive-1.1.dist-info/entry_points.txt,sha256=pnu7pquTqm1K1FYh6SbnBw8m3VGKrST-SORk5h2Vqqw,171 -setuptools_scm_git_archive-1.1.dist-info/top_level.txt,sha256=7hn9ByUzlXSHy5nZTcHi8oq763_bGWG4Zg-902SA01A,27 -setuptools_scm_git_archive-1.1.dist-info/RECORD,, diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/WHEEL b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/WHEEL deleted file mode 100644 index c8240f03e..000000000 --- a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/entry_points.txt b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/entry_points.txt deleted file mode 100644 index 5a714ca9d..000000000 --- a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/entry_points.txt +++ /dev/null @@ -1,6 +0,0 @@ -[setuptools_scm.parse_scm] -.git_archival.txt = setuptools_scm_git_archive:parse - -[setuptools_scm.parse_scm_fallback] -.git_archival.txt = setuptools_scm_git_archive:parse - diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/top_level.txt b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/top_level.txt deleted file mode 100644 index 066fe456c..000000000 --- a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/EGG-INFO/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -setuptools_scm_git_archive diff --git a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/setuptools_scm_git_archive/__init__.py b/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/setuptools_scm_git_archive/__init__.py deleted file mode 100644 index 1e8275817..000000000 --- a/.eggs/setuptools_scm_git_archive-1.1-py3.8.egg/setuptools_scm_git_archive/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from os.path import join -import re - -from setuptools_scm.utils import data_from_mime, trace -from setuptools_scm.version import meta, tags_to_versions - - -tag_re = re.compile(r'(?<=\btag: )([^,]+)\b') - - -def archival_to_version(data): - trace('data', data) - versions = tags_to_versions(tag_re.findall(data.get('ref-names', ''))) - if versions: - return meta(versions[0]) - - -def parse(root): - archival = join(root, '.git_archival.txt') - data = data_from_mime(archival) - return archival_to_version(data) diff --git a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE deleted file mode 100644 index e859590f8..000000000 --- a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 Taneli Hukkinen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO deleted file mode 100644 index ad224bfb4..000000000 --- a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/PKG-INFO +++ /dev/null @@ -1,208 +0,0 @@ -Metadata-Version: 2.1 -Name: tomli -Version: 1.2.2 -Summary: A lil' TOML parser -Keywords: toml -Author-email: Taneli Hukkinen -Requires-Python: >=3.6 -Description-Content-Type: text/markdown -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: MacOS -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: POSIX :: Linux -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: 3.10 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Typing :: Typed -Project-URL: Changelog, https://github.com/hukkin/tomli/blob/master/CHANGELOG.md -Project-URL: Homepage, https://github.com/hukkin/tomli - -[![Build Status](https://github.com/hukkin/tomli/workflows/Tests/badge.svg?branch=master)](https://github.com/hukkin/tomli/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush) -[![codecov.io](https://codecov.io/gh/hukkin/tomli/branch/master/graph/badge.svg)](https://codecov.io/gh/hukkin/tomli) -[![PyPI version](https://img.shields.io/pypi/v/tomli)](https://pypi.org/project/tomli) - -# Tomli - -> A lil' TOML parser - -**Table of Contents** *generated with [mdformat-toc](https://github.com/hukkin/mdformat-toc)* - - - -- [Intro](#intro) -- [Installation](#installation) -- [Usage](#usage) - - [Parse a TOML string](#parse-a-toml-string) - - [Parse a TOML file](#parse-a-toml-file) - - [Handle invalid TOML](#handle-invalid-toml) - - [Construct `decimal.Decimal`s from TOML floats](#construct-decimaldecimals-from-toml-floats) -- [FAQ](#faq) - - [Why this parser?](#why-this-parser) - - [Is comment preserving round-trip parsing supported?](#is-comment-preserving-round-trip-parsing-supported) - - [Is there a `dumps`, `write` or `encode` function?](#is-there-a-dumps-write-or-encode-function) - - [How do TOML types map into Python types?](#how-do-toml-types-map-into-python-types) -- [Performance](#performance) - - - -## Intro - -Tomli is a Python library for parsing [TOML](https://toml.io). -Tomli is fully compatible with [TOML v1.0.0](https://toml.io/en/v1.0.0). - -## Installation - -```bash -pip install tomli -``` - -## Usage - -### Parse a TOML string - -```python -import tomli - -toml_str = """ - gretzky = 99 - - [kurri] - jari = 17 - """ - -toml_dict = tomli.loads(toml_str) -assert toml_dict == {"gretzky": 99, "kurri": {"jari": 17}} -``` - -### Parse a TOML file - -```python -import tomli - -with open("path_to_file/conf.toml", "rb") as f: - toml_dict = tomli.load(f) -``` - -The file must be opened in binary mode (with the `"rb"` flag). -Binary mode will enforce decoding the file as UTF-8 with universal newlines disabled, -both of which are required to correctly parse TOML. -Support for text file objects is deprecated for removal in the next major release. - -### Handle invalid TOML - -```python -import tomli - -try: - toml_dict = tomli.loads("]] this is invalid TOML [[") -except tomli.TOMLDecodeError: - print("Yep, definitely not valid.") -``` - -Note that while the `TOMLDecodeError` type is public API, error messages of raised instances of it are not. -Error messages should not be assumed to stay constant across Tomli versions. - -### Construct `decimal.Decimal`s from TOML floats - -```python -from decimal import Decimal -import tomli - -toml_dict = tomli.loads("precision-matters = 0.982492", parse_float=Decimal) -assert toml_dict["precision-matters"] == Decimal("0.982492") -``` - -Note that `decimal.Decimal` can be replaced with another callable that converts a TOML float from string to a Python type. -The `decimal.Decimal` is, however, a practical choice for use cases where float inaccuracies can not be tolerated. - -Illegal types include `dict`, `list`, and anything that has the `append` attribute. -Parsing floats into an illegal type results in undefined behavior. - -## FAQ - -### Why this parser? - -- it's lil' -- pure Python with zero dependencies -- the fastest pure Python parser [\*](#performance): - 15x as fast as [tomlkit](https://pypi.org/project/tomlkit/), - 2.4x as fast as [toml](https://pypi.org/project/toml/) -- outputs [basic data types](#how-do-toml-types-map-into-python-types) only -- 100% spec compliant: passes all tests in - [a test set](https://github.com/toml-lang/compliance/pull/8) - soon to be merged to the official - [compliance tests for TOML](https://github.com/toml-lang/compliance) - repository -- thoroughly tested: 100% branch coverage - -### Is comment preserving round-trip parsing supported? - -No. - -The `tomli.loads` function returns a plain `dict` that is populated with builtin types and types from the standard library only. -Preserving comments requires a custom type to be returned so will not be supported, -at least not by the `tomli.loads` and `tomli.load` functions. - -Look into [TOML Kit](https://github.com/sdispater/tomlkit) if preservation of style is what you need. - -### Is there a `dumps`, `write` or `encode` function? - -[Tomli-W](https://github.com/hukkin/tomli-w) is the write-only counterpart of Tomli, providing `dump` and `dumps` functions. - -The core library does not include write capability, as most TOML use cases are read-only, and Tomli intends to be minimal. - -### How do TOML types map into Python types? - -| TOML type | Python type | Details | -| ---------------- | ------------------- | ------------------------------------------------------------ | -| Document Root | `dict` | | -| Key | `str` | | -| String | `str` | | -| Integer | `int` | | -| Float | `float` | | -| Boolean | `bool` | | -| Offset Date-Time | `datetime.datetime` | `tzinfo` attribute set to an instance of `datetime.timezone` | -| Local Date-Time | `datetime.datetime` | `tzinfo` attribute set to `None` | -| Local Date | `datetime.date` | | -| Local Time | `datetime.time` | | -| Array | `list` | | -| Table | `dict` | | -| Inline Table | `dict` | | - -## Performance - -The `benchmark/` folder in this repository contains a performance benchmark for comparing the various Python TOML parsers. -The benchmark can be run with `tox -e benchmark-pypi`. -Running the benchmark on my personal computer output the following: - -```console -foo@bar:~/dev/tomli$ tox -e benchmark-pypi -benchmark-pypi installed: attrs==19.3.0,click==7.1.2,pytomlpp==1.0.2,qtoml==0.3.0,rtoml==0.7.0,toml==0.10.2,tomli==1.1.0,tomlkit==0.7.2 -benchmark-pypi run-test-pre: PYTHONHASHSEED='2658546909' -benchmark-pypi run-test: commands[0] | python -c 'import datetime; print(datetime.date.today())' -2021-07-23 -benchmark-pypi run-test: commands[1] | python --version -Python 3.8.10 -benchmark-pypi run-test: commands[2] | python benchmark/run.py -Parsing data.toml 5000 times: ------------------------------------------------------- - parser | exec time | performance (more is better) ------------+------------+----------------------------- - rtoml | 0.901 s | baseline (100%) - pytomlpp | 1.08 s | 83.15% - tomli | 3.89 s | 23.15% - toml | 9.36 s | 9.63% - qtoml | 11.5 s | 7.82% - tomlkit | 56.8 s | 1.59% -``` - -The parsers are ordered from fastest to slowest, using the fastest parser as baseline. -Tomli performed the best out of all pure Python TOML parsers, -losing only to pytomlpp (wraps C++) and rtoml (wraps Rust). - diff --git a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD deleted file mode 100644 index 880ac4ed3..000000000 --- a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/RECORD +++ /dev/null @@ -1,9 +0,0 @@ -tomli/__init__.py,sha256=kbhPFVUJrQxajcxAWEbYzDYEjjtRJ6dGT74U4XTOkhI,299 -tomli/_parser.py,sha256=HYJuOBq1QBZm0O6PMeLJPULdYVwsdYcdZUSuABujXTM,21659 -tomli/_re.py,sha256=bw4_EVo4n1qZwcEza7akJQ_wM6hLDJFn1Zsuf9YSjs8,2817 -tomli/_types.py,sha256=b1mavYLUYLBz0EP2lDrMVM6EGVFeqvxiqkS03jXNBvs,126 -tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 -tomli-1.2.2.dist-info/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -tomli-1.2.2.dist-info/WHEEL,sha256=pVNS5wRGlMB8qzi0M1coslDk7i694hS7VxZqRXRntY4,81 -tomli-1.2.2.dist-info/METADATA,sha256=bhJIzo0PW08BpJ2wMFAGN19RxM8pU1eO5FMtjhAojRc,9089 -tomli-1.2.2.dist-info/RECORD,, diff --git a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL b/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL deleted file mode 100644 index 3c6a1028c..000000000 --- a/.eggs/tomli-1.2.2-py3.8.egg/EGG-INFO/WHEEL +++ /dev/null @@ -1,4 +0,0 @@ -Wheel-Version: 1.0 -Generator: flit 3.4.0 -Root-Is-Purelib: true -Tag: py3-none-any diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py b/.eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py deleted file mode 100644 index 7bcdbab36..000000000 --- a/.eggs/tomli-1.2.2-py3.8.egg/tomli/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""A lil' TOML parser.""" - -__all__ = ("loads", "load", "TOMLDecodeError") -__version__ = "1.2.2" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT - -from tomli._parser import TOMLDecodeError, load, loads - -# Pretend this exception was created here. -TOMLDecodeError.__module__ = "tomli" diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py b/.eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py deleted file mode 100644 index 89e81c3b3..000000000 --- a/.eggs/tomli-1.2.2-py3.8.egg/tomli/_parser.py +++ /dev/null @@ -1,663 +0,0 @@ -import string -from types import MappingProxyType -from typing import Any, BinaryIO, Dict, FrozenSet, Iterable, NamedTuple, Optional, Tuple -import warnings - -from tomli._re import ( - RE_DATETIME, - RE_LOCALTIME, - RE_NUMBER, - match_to_datetime, - match_to_localtime, - match_to_number, -) -from tomli._types import Key, ParseFloat, Pos - -ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) - -# Neither of these sets include quotation mark or backslash. They are -# currently handled as separate cases in the parser functions. -ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") -ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") - -ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS -ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS - -ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS - -TOML_WS = frozenset(" \t") -TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") -BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") -KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") -HEXDIGIT_CHARS = frozenset(string.hexdigits) - -BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( - { - "\\b": "\u0008", # backspace - "\\t": "\u0009", # tab - "\\n": "\u000A", # linefeed - "\\f": "\u000C", # form feed - "\\r": "\u000D", # carriage return - '\\"': "\u0022", # quote - "\\\\": "\u005C", # backslash - } -) - - -class TOMLDecodeError(ValueError): - """An error raised if a document is not valid TOML.""" - - -def load(fp: BinaryIO, *, parse_float: ParseFloat = float) -> Dict[str, Any]: - """Parse TOML from a binary file object.""" - s_bytes = fp.read() - try: - s = s_bytes.decode() - except AttributeError: - warnings.warn( - "Text file object support is deprecated in favor of binary file objects." - ' Use `open("foo.toml", "rb")` to open the file in binary mode.', - DeprecationWarning, - stacklevel=2, - ) - s = s_bytes # type: ignore[assignment] - return loads(s, parse_float=parse_float) - - -def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa: C901 - """Parse TOML from a string.""" - - # The spec allows converting "\r\n" to "\n", even in string - # literals. Let's do so to simplify parsing. - src = s.replace("\r\n", "\n") - pos = 0 - out = Output(NestedDict(), Flags()) - header: Key = () - - # Parse one statement at a time - # (typically means one line in TOML source) - while True: - # 1. Skip line leading whitespace - pos = skip_chars(src, pos, TOML_WS) - - # 2. Parse rules. Expect one of the following: - # - end of file - # - end of line - # - comment - # - key/value pair - # - append dict to list (and move to its namespace) - # - create dict (and move to its namespace) - # Skip trailing whitespace when applicable. - try: - char = src[pos] - except IndexError: - break - if char == "\n": - pos += 1 - continue - if char in KEY_INITIAL_CHARS: - pos = key_value_rule(src, pos, out, header, parse_float) - pos = skip_chars(src, pos, TOML_WS) - elif char == "[": - try: - second_char: Optional[str] = src[pos + 1] - except IndexError: - second_char = None - if second_char == "[": - pos, header = create_list_rule(src, pos, out) - else: - pos, header = create_dict_rule(src, pos, out) - pos = skip_chars(src, pos, TOML_WS) - elif char != "#": - raise suffixed_err(src, pos, "Invalid statement") - - # 3. Skip comment - pos = skip_comment(src, pos) - - # 4. Expect end of line or end of file - try: - char = src[pos] - except IndexError: - break - if char != "\n": - raise suffixed_err( - src, pos, "Expected newline or end of document after a statement" - ) - pos += 1 - - return out.data.dict - - -class Flags: - """Flags that map to parsed keys/namespaces.""" - - # Marks an immutable namespace (inline array or inline table). - FROZEN = 0 - # Marks a nest that has been explicitly created and can no longer - # be opened using the "[table]" syntax. - EXPLICIT_NEST = 1 - - def __init__(self) -> None: - self._flags: Dict[str, dict] = {} - - def unset_all(self, key: Key) -> None: - cont = self._flags - for k in key[:-1]: - if k not in cont: - return - cont = cont[k]["nested"] - cont.pop(key[-1], None) - - def set_for_relative_key(self, head_key: Key, rel_key: Key, flag: int) -> None: - cont = self._flags - for k in head_key: - if k not in cont: - cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} - cont = cont[k]["nested"] - for k in rel_key: - if k in cont: - cont[k]["flags"].add(flag) - else: - cont[k] = {"flags": {flag}, "recursive_flags": set(), "nested": {}} - cont = cont[k]["nested"] - - def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 - cont = self._flags - key_parent, key_stem = key[:-1], key[-1] - for k in key_parent: - if k not in cont: - cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} - cont = cont[k]["nested"] - if key_stem not in cont: - cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} - cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) - - def is_(self, key: Key, flag: int) -> bool: - if not key: - return False # document root has no flags - cont = self._flags - for k in key[:-1]: - if k not in cont: - return False - inner_cont = cont[k] - if flag in inner_cont["recursive_flags"]: - return True - cont = inner_cont["nested"] - key_stem = key[-1] - if key_stem in cont: - cont = cont[key_stem] - return flag in cont["flags"] or flag in cont["recursive_flags"] - return False - - -class NestedDict: - def __init__(self) -> None: - # The parsed content of the TOML document - self.dict: Dict[str, Any] = {} - - def get_or_create_nest( - self, - key: Key, - *, - access_lists: bool = True, - ) -> dict: - cont: Any = self.dict - for k in key: - if k not in cont: - cont[k] = {} - cont = cont[k] - if access_lists and isinstance(cont, list): - cont = cont[-1] - if not isinstance(cont, dict): - raise KeyError("There is no nest behind this key") - return cont - - def append_nest_to_list(self, key: Key) -> None: - cont = self.get_or_create_nest(key[:-1]) - last_key = key[-1] - if last_key in cont: - list_ = cont[last_key] - try: - list_.append({}) - except AttributeError: - raise KeyError("An object other than list found behind this key") - else: - cont[last_key] = [{}] - - -class Output(NamedTuple): - data: NestedDict - flags: Flags - - -def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: - try: - while src[pos] in chars: - pos += 1 - except IndexError: - pass - return pos - - -def skip_until( - src: str, - pos: Pos, - expect: str, - *, - error_on: FrozenSet[str], - error_on_eof: bool, -) -> Pos: - try: - new_pos = src.index(expect, pos) - except ValueError: - new_pos = len(src) - if error_on_eof: - raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None - - if not error_on.isdisjoint(src[pos:new_pos]): - while src[pos] not in error_on: - pos += 1 - raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}") - return new_pos - - -def skip_comment(src: str, pos: Pos) -> Pos: - try: - char: Optional[str] = src[pos] - except IndexError: - char = None - if char == "#": - return skip_until( - src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False - ) - return pos - - -def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: - while True: - pos_before_skip = pos - pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) - pos = skip_comment(src, pos) - if pos == pos_before_skip: - return pos - - -def create_dict_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]: - pos += 1 # Skip "[" - pos = skip_chars(src, pos, TOML_WS) - pos, key = parse_key(src, pos) - - if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Can not declare {key} twice") - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) - try: - out.data.get_or_create_nest(key) - except KeyError: - raise suffixed_err(src, pos, "Can not overwrite a value") from None - - if not src.startswith("]", pos): - raise suffixed_err(src, pos, 'Expected "]" at the end of a table declaration') - return pos + 1, key - - -def create_list_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]: - pos += 2 # Skip "[[" - pos = skip_chars(src, pos, TOML_WS) - pos, key = parse_key(src, pos) - - if out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") - # Free the namespace now that it points to another empty list item... - out.flags.unset_all(key) - # ...but this key precisely is still prohibited from table declaration - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) - try: - out.data.append_nest_to_list(key) - except KeyError: - raise suffixed_err(src, pos, "Can not overwrite a value") from None - - if not src.startswith("]]", pos): - raise suffixed_err(src, pos, 'Expected "]]" at the end of an array declaration') - return pos + 2, key - - -def key_value_rule( - src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat -) -> Pos: - pos, key, value = parse_key_value_pair(src, pos, parse_float) - key_parent, key_stem = key[:-1], key[-1] - abs_key_parent = header + key_parent - - if out.flags.is_(abs_key_parent, Flags.FROZEN): - raise suffixed_err( - src, pos, f"Can not mutate immutable namespace {abs_key_parent}" - ) - # Containers in the relative path can't be opened with the table syntax after this - out.flags.set_for_relative_key(header, key, Flags.EXPLICIT_NEST) - try: - nest = out.data.get_or_create_nest(abs_key_parent) - except KeyError: - raise suffixed_err(src, pos, "Can not overwrite a value") from None - if key_stem in nest: - raise suffixed_err(src, pos, "Can not overwrite a value") - # Mark inline table and array namespaces recursively immutable - if isinstance(value, (dict, list)): - out.flags.set(header + key, Flags.FROZEN, recursive=True) - nest[key_stem] = value - return pos - - -def parse_key_value_pair( - src: str, pos: Pos, parse_float: ParseFloat -) -> Tuple[Pos, Key, Any]: - pos, key = parse_key(src, pos) - try: - char: Optional[str] = src[pos] - except IndexError: - char = None - if char != "=": - raise suffixed_err(src, pos, 'Expected "=" after a key in a key/value pair') - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - pos, value = parse_value(src, pos, parse_float) - return pos, key, value - - -def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]: - pos, key_part = parse_key_part(src, pos) - key: Key = (key_part,) - pos = skip_chars(src, pos, TOML_WS) - while True: - try: - char: Optional[str] = src[pos] - except IndexError: - char = None - if char != ".": - return pos, key - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - pos, key_part = parse_key_part(src, pos) - key += (key_part,) - pos = skip_chars(src, pos, TOML_WS) - - -def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]: - try: - char: Optional[str] = src[pos] - except IndexError: - char = None - if char in BARE_KEY_CHARS: - start_pos = pos - pos = skip_chars(src, pos, BARE_KEY_CHARS) - return pos, src[start_pos:pos] - if char == "'": - return parse_literal_str(src, pos) - if char == '"': - return parse_one_line_basic_str(src, pos) - raise suffixed_err(src, pos, "Invalid initial character for a key part") - - -def parse_one_line_basic_str(src: str, pos: Pos) -> Tuple[Pos, str]: - pos += 1 - return parse_basic_str(src, pos, multiline=False) - - -def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]: - pos += 1 - array: list = [] - - pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): - return pos + 1, array - while True: - pos, val = parse_value(src, pos, parse_float) - array.append(val) - pos = skip_comments_and_array_ws(src, pos) - - c = src[pos : pos + 1] - if c == "]": - return pos + 1, array - if c != ",": - raise suffixed_err(src, pos, "Unclosed array") - pos += 1 - - pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): - return pos + 1, array - - -def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, dict]: - pos += 1 - nested_dict = NestedDict() - flags = Flags() - - pos = skip_chars(src, pos, TOML_WS) - if src.startswith("}", pos): - return pos + 1, nested_dict.dict - while True: - pos, key, value = parse_key_value_pair(src, pos, parse_float) - key_parent, key_stem = key[:-1], key[-1] - if flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") - try: - nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) - except KeyError: - raise suffixed_err(src, pos, "Can not overwrite a value") from None - if key_stem in nest: - raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}") - nest[key_stem] = value - pos = skip_chars(src, pos, TOML_WS) - c = src[pos : pos + 1] - if c == "}": - return pos + 1, nested_dict.dict - if c != ",": - raise suffixed_err(src, pos, "Unclosed inline table") - if isinstance(value, (dict, list)): - flags.set(key, Flags.FROZEN, recursive=True) - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - - -def parse_basic_str_escape( # noqa: C901 - src: str, pos: Pos, *, multiline: bool = False -) -> Tuple[Pos, str]: - escape_id = src[pos : pos + 2] - pos += 2 - if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: - # Skip whitespace until next non-whitespace character or end of - # the doc. Error if non-whitespace is found before newline. - if escape_id != "\\\n": - pos = skip_chars(src, pos, TOML_WS) - try: - char = src[pos] - except IndexError: - return pos, "" - if char != "\n": - raise suffixed_err(src, pos, 'Unescaped "\\" in a string') - pos += 1 - pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) - return pos, "" - if escape_id == "\\u": - return parse_hex_char(src, pos, 4) - if escape_id == "\\U": - return parse_hex_char(src, pos, 8) - try: - return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] - except KeyError: - if len(escape_id) != 2: - raise suffixed_err(src, pos, "Unterminated string") from None - raise suffixed_err(src, pos, 'Unescaped "\\" in a string') from None - - -def parse_basic_str_escape_multiline(src: str, pos: Pos) -> Tuple[Pos, str]: - return parse_basic_str_escape(src, pos, multiline=True) - - -def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]: - hex_str = src[pos : pos + hex_len] - if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): - raise suffixed_err(src, pos, "Invalid hex value") - pos += hex_len - hex_int = int(hex_str, 16) - if not is_unicode_scalar_value(hex_int): - raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value") - return pos, chr(hex_int) - - -def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]: - pos += 1 # Skip starting apostrophe - start_pos = pos - pos = skip_until( - src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True - ) - return pos + 1, src[start_pos:pos] # Skip ending apostrophe - - -def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]: - pos += 3 - if src.startswith("\n", pos): - pos += 1 - - if literal: - delim = "'" - end_pos = skip_until( - src, - pos, - "'''", - error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, - error_on_eof=True, - ) - result = src[pos:end_pos] - pos = end_pos + 3 - else: - delim = '"' - pos, result = parse_basic_str(src, pos, multiline=True) - - # Add at maximum two extra apostrophes/quotes if the end sequence - # is 4 or 5 chars long instead of just 3. - if not src.startswith(delim, pos): - return pos, result - pos += 1 - if not src.startswith(delim, pos): - return pos, result + delim - pos += 1 - return pos, result + (delim * 2) - - -def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]: - if multiline: - error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS - parse_escapes = parse_basic_str_escape_multiline - else: - error_on = ILLEGAL_BASIC_STR_CHARS - parse_escapes = parse_basic_str_escape - result = "" - start_pos = pos - while True: - try: - char = src[pos] - except IndexError: - raise suffixed_err(src, pos, "Unterminated string") from None - if char == '"': - if not multiline: - return pos + 1, result + src[start_pos:pos] - if src.startswith('"""', pos): - return pos + 3, result + src[start_pos:pos] - pos += 1 - continue - if char == "\\": - result += src[start_pos:pos] - pos, parsed_escape = parse_escapes(src, pos) - result += parsed_escape - start_pos = pos - continue - if char in error_on: - raise suffixed_err(src, pos, f"Illegal character {char!r}") - pos += 1 - - -def parse_value( # noqa: C901 - src: str, pos: Pos, parse_float: ParseFloat -) -> Tuple[Pos, Any]: - try: - char: Optional[str] = src[pos] - except IndexError: - char = None - - # Basic strings - if char == '"': - if src.startswith('"""', pos): - return parse_multiline_str(src, pos, literal=False) - return parse_one_line_basic_str(src, pos) - - # Literal strings - if char == "'": - if src.startswith("'''", pos): - return parse_multiline_str(src, pos, literal=True) - return parse_literal_str(src, pos) - - # Booleans - if char == "t": - if src.startswith("true", pos): - return pos + 4, True - if char == "f": - if src.startswith("false", pos): - return pos + 5, False - - # Dates and times - datetime_match = RE_DATETIME.match(src, pos) - if datetime_match: - try: - datetime_obj = match_to_datetime(datetime_match) - except ValueError as e: - raise suffixed_err(src, pos, "Invalid date or datetime") from e - return datetime_match.end(), datetime_obj - localtime_match = RE_LOCALTIME.match(src, pos) - if localtime_match: - return localtime_match.end(), match_to_localtime(localtime_match) - - # Integers and "normal" floats. - # The regex will greedily match any type starting with a decimal - # char, so needs to be located after handling of dates and times. - number_match = RE_NUMBER.match(src, pos) - if number_match: - return number_match.end(), match_to_number(number_match, parse_float) - - # Arrays - if char == "[": - return parse_array(src, pos, parse_float) - - # Inline tables - if char == "{": - return parse_inline_table(src, pos, parse_float) - - # Special floats - first_three = src[pos : pos + 3] - if first_three in {"inf", "nan"}: - return pos + 3, parse_float(first_three) - first_four = src[pos : pos + 4] - if first_four in {"-inf", "+inf", "-nan", "+nan"}: - return pos + 4, parse_float(first_four) - - raise suffixed_err(src, pos, "Invalid value") - - -def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: - """Return a `TOMLDecodeError` where error message is suffixed with - coordinates in source.""" - - def coord_repr(src: str, pos: Pos) -> str: - if pos >= len(src): - return "end of document" - line = src.count("\n", 0, pos) + 1 - if line == 1: - column = pos + 1 - else: - column = pos - src.rindex("\n", 0, pos) - return f"line {line}, column {column}" - - return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") - - -def is_unicode_scalar_value(codepoint: int) -> bool: - return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py b/.eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py deleted file mode 100644 index 912682974..000000000 --- a/.eggs/tomli-1.2.2-py3.8.egg/tomli/_re.py +++ /dev/null @@ -1,101 +0,0 @@ -from datetime import date, datetime, time, timedelta, timezone, tzinfo -from functools import lru_cache -import re -from typing import Any, Optional, Union - -from tomli._types import ParseFloat - -# E.g. -# - 00:32:00.999999 -# - 00:32:00 -_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" - -RE_NUMBER = re.compile( - r""" -0 -(?: - x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex - | - b[01](?:_?[01])* # bin - | - o[0-7](?:_?[0-7])* # oct -) -| -[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part -(?P - (?:\.[0-9](?:_?[0-9])*)? # optional fractional part - (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part -) -""", - flags=re.VERBOSE, -) -RE_LOCALTIME = re.compile(_TIME_RE_STR) -RE_DATETIME = re.compile( - fr""" -([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 -(?: - [T ] - {_TIME_RE_STR} - (?:(Z)|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset -)? -""", - flags=re.VERBOSE, -) - - -def match_to_datetime(match: "re.Match") -> Union[datetime, date]: - """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. - - Raises ValueError if the match does not correspond to a valid date - or datetime. - """ - ( - year_str, - month_str, - day_str, - hour_str, - minute_str, - sec_str, - micros_str, - zulu_time, - offset_sign_str, - offset_hour_str, - offset_minute_str, - ) = match.groups() - year, month, day = int(year_str), int(month_str), int(day_str) - if hour_str is None: - return date(year, month, day) - hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - if offset_sign_str: - tz: Optional[tzinfo] = cached_tz( - offset_hour_str, offset_minute_str, offset_sign_str - ) - elif zulu_time: - tz = timezone.utc - else: # local date-time - tz = None - return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) - - -@lru_cache(maxsize=None) -def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: - sign = 1 if sign_str == "+" else -1 - return timezone( - timedelta( - hours=sign * int(hour_str), - minutes=sign * int(minute_str), - ) - ) - - -def match_to_localtime(match: "re.Match") -> time: - hour_str, minute_str, sec_str, micros_str = match.groups() - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - return time(int(hour_str), int(minute_str), int(sec_str), micros) - - -def match_to_number(match: "re.Match", parse_float: "ParseFloat") -> Any: - if match.group("floatpart"): - return parse_float(match.group()) - return int(match.group(), 0) diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py b/.eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py deleted file mode 100644 index e37cc8088..000000000 --- a/.eggs/tomli-1.2.2-py3.8.egg/tomli/_types.py +++ /dev/null @@ -1,6 +0,0 @@ -from typing import Any, Callable, Tuple - -# Type annotations -ParseFloat = Callable[[str], Any] -Key = Tuple[str, ...] -Pos = int diff --git a/.eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed b/.eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed deleted file mode 100644 index 7632ecf77..000000000 --- a/.eggs/tomli-1.2.2-py3.8.egg/tomli/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561 From 07051374fc98a9c5d3c57ef5690b432daacf36fd Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Tue, 18 Jan 2022 11:45:02 -0500 Subject: [PATCH 10/15] Update stonesoup/plotter.py Co-authored-by: Nikki Perree <71126372+nperree-dstl@users.noreply.github.com> --- stonesoup/plotter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/plotter.py b/stonesoup/plotter.py index b6c911350..b3bb11f55 100644 --- a/stonesoup/plotter.py +++ b/stonesoup/plotter.py @@ -214,7 +214,7 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ """Plots track(s) Plots each track generated, generating a legend automatically. If ``uncertainty=True`` - and is being plotted in 2D, error elipses are plotted. If beingp plotted in + and is being plotted in 2D, error elipses are plotted. If being plotted in 3D, uncertainty bars are plotted every :attr:`err_freq` measurement, default plots unceratinty bars at every track step. Tracks are plotted as solid lines with point markers and default colors. Uncertainty bars are plotted From d3315ae25b22f39119e9b2f66db400a9cb72e3da Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Tue, 18 Jan 2022 11:45:15 -0500 Subject: [PATCH 11/15] Update stonesoup/plotter.py Co-authored-by: Nikki Perree <71126372+nperree-dstl@users.noreply.github.com> --- stonesoup/plotter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/plotter.py b/stonesoup/plotter.py index b3bb11f55..2012d3e02 100644 --- a/stonesoup/plotter.py +++ b/stonesoup/plotter.py @@ -229,7 +229,7 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ Set of tracks which will be plotted. If not a set, and instead a single :class:`~.Track` type, the argument is modified to be a set to allow for iteration. mapping: list - List of 3 items specifying the mapping of the x, y, and z position + List of items specifying the mapping of the position components of the state space. uncertainty : bool If True, function plots uncertainty bars in x, y, and z. From 47d6ee1c27c4541ce3182e9872b461caf34a0bdf Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Tue, 18 Jan 2022 11:45:29 -0500 Subject: [PATCH 12/15] Update stonesoup/plotter.py Co-authored-by: Nikki Perree <71126372+nperree-dstl@users.noreply.github.com> --- stonesoup/plotter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/plotter.py b/stonesoup/plotter.py index 2012d3e02..f55904e09 100644 --- a/stonesoup/plotter.py +++ b/stonesoup/plotter.py @@ -220,7 +220,7 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ lines with point markers and default colors. Uncertainty bars are plotted with a default color which is the same for all tracks. - Users can change linestyle, color and marker using keyword arguments. Uncertainty bars + Users can change linestyle, color and marker using keyword arguments. Uncertainty ellipses/bars will also be plotted with the user defined colour and any changes will apply to all tracks. Parameters From 7e56c2b7356e511676205030e9bdb0e5f51e7fcd Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Tue, 18 Jan 2022 11:45:44 -0500 Subject: [PATCH 13/15] Update stonesoup/plotter.py Co-authored-by: Nikki Perree <71126372+nperree-dstl@users.noreply.github.com> --- stonesoup/plotter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/plotter.py b/stonesoup/plotter.py index f55904e09..cab3fb6f8 100644 --- a/stonesoup/plotter.py +++ b/stonesoup/plotter.py @@ -232,7 +232,7 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ List of items specifying the mapping of the position components of the state space. uncertainty : bool - If True, function plots uncertainty bars in x, y, and z. + If True, function plots uncertainty ellipses or bars. particle : bool If True, function plots particles. track_label: str From cf6f375146f7cd29249761ed404998a7d7bc7037 Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Wed, 26 Jan 2022 13:15:22 -0500 Subject: [PATCH 14/15] Remove seed for plotter track test As per @sdhiscocks recommendations --- stonesoup/tests/test_plotter.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/stonesoup/tests/test_plotter.py b/stonesoup/tests/test_plotter.py index e2eb330a9..258dab64c 100644 --- a/stonesoup/tests/test_plotter.py +++ b/stonesoup/tests/test_plotter.py @@ -26,8 +26,6 @@ from stonesoup.types.track import Track -np.random.seed(1991) - start_time = datetime.now() transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.005), ConstantVelocity(0.005)]) From f190be0bb577650db26cb10d4c86b5c790224d4d Mon Sep 17 00:00:00 2001 From: PACarniglia <43048394+PACarniglia@users.noreply.github.com> Date: Wed, 2 Feb 2022 16:11:31 -0500 Subject: [PATCH 15/15] Shortened line in plotter.py to comply with PEP8 Should rectify tests 36/37 --- stonesoup/plotter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/plotter.py b/stonesoup/plotter.py index cab3fb6f8..9cdc0c807 100644 --- a/stonesoup/plotter.py +++ b/stonesoup/plotter.py @@ -220,7 +220,7 @@ def plot_tracks(self, tracks, mapping, uncertainty=False, particle=False, track_ lines with point markers and default colors. Uncertainty bars are plotted with a default color which is the same for all tracks. - Users can change linestyle, color and marker using keyword arguments. Uncertainty ellipses/bars + Users can change linestyle, color and marker using keyword arguments. Uncertainty metrics will also be plotted with the user defined colour and any changes will apply to all tracks. Parameters