From b08d041d89bb39bf73a5769c3e7747e33138f72b Mon Sep 17 00:00:00 2001 From: Ethan Stein Date: Thu, 4 May 2023 14:08:10 -0700 Subject: [PATCH 001/105] Adding archive from singer-io --- archive/.github/pull_request_template.md | 11 + archive/CHANGELOG.md | 104 + archive/LICENSE | 620 ++++++ archive/MANIFEST.in | 2 + archive/README.md | 44 + archive/bin/run-a-test.sh | 5 + archive/bin/run-all-tests.sh | 1 + archive/config.sample.json | 9 + archive/setup.cfg | 2 + archive/setup.py | 46 + archive/tap_hubspot/__init__.py | 1306 +++++++++++++ archive/tap_hubspot/schemas/campaigns.json | 91 + archive/tap_hubspot/schemas/companies.json | 11 + .../tap_hubspot/schemas/contact_lists.json | 97 + archive/tap_hubspot/schemas/contacts.json | 201 ++ .../schemas/contacts_by_company.json | 12 + .../tap_hubspot/schemas/deal_pipelines.json | 46 + archive/tap_hubspot/schemas/deals.json | 37 + archive/tap_hubspot/schemas/email_events.json | 118 ++ archive/tap_hubspot/schemas/engagements.json | 179 ++ archive/tap_hubspot/schemas/forms.json | 229 +++ archive/tap_hubspot/schemas/owners.json | 72 + .../schemas/subscription_changes.json | 54 + archive/tap_hubspot/schemas/tickets.json | 138 ++ archive/tap_hubspot/schemas/versions.json | 30 + archive/tap_hubspot/schemas/workflows.json | 48 + archive/tap_hubspot/tests/__init__.py | 0 archive/tap_hubspot/tests/test_bookmarks.py | 62 + archive/tap_hubspot/tests/test_deals.py | 34 + .../tests/test_get_streams_to_sync.py | 44 + archive/tap_hubspot/tests/test_offsets.py | 57 + .../tests/unittests/test_get_start.py | 94 + .../tests/unittests/test_request_timeout.py | 121 ++ .../tests/unittests/test_tickets.py | 147 ++ archive/tap_hubspot/tests/utils.py | 80 + archive/tests/base.py | 390 ++++ archive/tests/client.py | 1679 +++++++++++++++++ archive/tests/client_tester.py | 280 +++ archive/tests/test_hubspot_all_fields.py | 327 ++++ .../tests/test_hubspot_automatic_fields.py | 109 ++ archive/tests/test_hubspot_bookmarks.py | 248 +++ .../tests/test_hubspot_bookmarks_static.py | 127 ++ .../tests/test_hubspot_child_stream_only.py | 88 + archive/tests/test_hubspot_discovery.py | 131 ++ .../tests/test_hubspot_interrupted_sync.py | 142 ++ .../test_hubspot_interrupted_sync_offset.py | 141 ++ archive/tests/test_hubspot_pagination.py | 140 ++ archive/tests/test_hubspot_start_date.py | 179 ++ archive/tests/unittests/test_deals.py | 101 + 49 files changed, 8234 insertions(+) create mode 100644 archive/.github/pull_request_template.md create mode 100644 archive/CHANGELOG.md create mode 100644 archive/LICENSE create mode 100644 archive/MANIFEST.in create mode 100644 archive/README.md create mode 100755 archive/bin/run-a-test.sh create mode 100755 archive/bin/run-all-tests.sh create mode 100644 archive/config.sample.json create mode 100644 archive/setup.cfg create mode 100644 archive/setup.py create mode 100644 archive/tap_hubspot/__init__.py create mode 100644 archive/tap_hubspot/schemas/campaigns.json create mode 100644 archive/tap_hubspot/schemas/companies.json create mode 100644 archive/tap_hubspot/schemas/contact_lists.json create mode 100644 archive/tap_hubspot/schemas/contacts.json create mode 100644 archive/tap_hubspot/schemas/contacts_by_company.json create mode 100644 archive/tap_hubspot/schemas/deal_pipelines.json create mode 100644 archive/tap_hubspot/schemas/deals.json create mode 100644 archive/tap_hubspot/schemas/email_events.json create mode 100644 archive/tap_hubspot/schemas/engagements.json create mode 100644 archive/tap_hubspot/schemas/forms.json create mode 100644 archive/tap_hubspot/schemas/owners.json create mode 100644 archive/tap_hubspot/schemas/subscription_changes.json create mode 100644 archive/tap_hubspot/schemas/tickets.json create mode 100644 archive/tap_hubspot/schemas/versions.json create mode 100644 archive/tap_hubspot/schemas/workflows.json create mode 100644 archive/tap_hubspot/tests/__init__.py create mode 100644 archive/tap_hubspot/tests/test_bookmarks.py create mode 100644 archive/tap_hubspot/tests/test_deals.py create mode 100644 archive/tap_hubspot/tests/test_get_streams_to_sync.py create mode 100644 archive/tap_hubspot/tests/test_offsets.py create mode 100644 archive/tap_hubspot/tests/unittests/test_get_start.py create mode 100644 archive/tap_hubspot/tests/unittests/test_request_timeout.py create mode 100644 archive/tap_hubspot/tests/unittests/test_tickets.py create mode 100644 archive/tap_hubspot/tests/utils.py create mode 100644 archive/tests/base.py create mode 100644 archive/tests/client.py create mode 100644 archive/tests/client_tester.py create mode 100644 archive/tests/test_hubspot_all_fields.py create mode 100644 archive/tests/test_hubspot_automatic_fields.py create mode 100644 archive/tests/test_hubspot_bookmarks.py create mode 100644 archive/tests/test_hubspot_bookmarks_static.py create mode 100644 archive/tests/test_hubspot_child_stream_only.py create mode 100644 archive/tests/test_hubspot_discovery.py create mode 100644 archive/tests/test_hubspot_interrupted_sync.py create mode 100644 archive/tests/test_hubspot_interrupted_sync_offset.py create mode 100644 archive/tests/test_hubspot_pagination.py create mode 100644 archive/tests/test_hubspot_start_date.py create mode 100644 archive/tests/unittests/test_deals.py diff --git a/archive/.github/pull_request_template.md b/archive/.github/pull_request_template.md new file mode 100644 index 0000000..6e46b00 --- /dev/null +++ b/archive/.github/pull_request_template.md @@ -0,0 +1,11 @@ +# Description of change +(write a short description or paste a link to JIRA) + +# Manual QA steps + - + +# Risks + - + +# Rollback steps + - revert this branch diff --git a/archive/CHANGELOG.md b/archive/CHANGELOG.md new file mode 100644 index 0000000..4077450 --- /dev/null +++ b/archive/CHANGELOG.md @@ -0,0 +1,104 @@ +# Changelog + +## 2.12.1 + * Use sync start time for writing bookmarks [#226](https://github.com/singer-io/tap-hubspot/pull/226) + +## 2.12.0 + * Include properties(default + custom) in tickets stream [#220](https://github.com/singer-io/tap-hubspot/pull/220) + +## 2.11.0 + * Implement new stream - `tickets` [#218](https://github.com/singer-io/tap-hubspot/pull/218) + * Update integration tests for the tickets stream implementation [#219](https://github.com/singer-io/tap-hubspot/pull/219) + +## 2.10.0 + * Updated replication method as INCREMENTAL and replication key as property_hs_lastmodifieddate for deals and companies streams [#195](https://github.com/singer-io/tap-hubspot/pull/195) + * Fixed Pylint errors [#204](https://github.com/singer-io/tap-hubspot/pull/204) + +## 2.9.6 + * Implement Request Timeout [#177](https://github.com/singer-io/tap-hubspot/pull/177) + * Add version timestamp in contacts [#191](https://github.com/singer-io/tap-hubspot/pull/191 + +## 2.9.5 + * Fixes a bug in sending the fields to the v3 Deals endpoint [#145](https://github.com/singer-io/tap-hubspot/pull/145) + +## 2.9.4 + * Reverts 142 [#144](https://github.com/singer-io/tap-hubspot/pull/144) + +## 2.9.3 + * Add support for property_versions [#142](https://github.com/singer-io/tap-hubspot/pull/142) + +## 2.9.2 + * Change `POST` to V3 Deals to use one non-standard field instead of all fields we want [#139](https://github.com/singer-io/tap-hubspot/pull/139) + * See the pull request for a more detailed explaination + +## 2.9.1 + * Add retry logic to V3 calls [#136](https://github.com/singer-io/tap-hubspot/pull/136) + +## 2.9.0 + * Add fields to Deals stream - `hs_date_entered_*` and `hs_date_exited_*` [#133](https://github.com/singer-io/tap-hubspot/pull/133) + +## 2.8.1 + * Reverts `v2.8.0` back to `v.2.7.0` + +## 2.8.0 + * Add fields to Deals stream - `hs_date_entered_*` and `hs_date_exited_*` [#124](https://github.com/singer-io/tap-hubspot/pull/124) + +## 2.7.0 + * Fields nested under `properties` are copied to top level and prepended with `property_` [#107](https://github.com/singer-io/tap-hubspot/pull/107) + +## 2.6.5 + * For `deals` stream, use `includeAllProperties` flag instead of appending all properties to request url [#112](https://github.com/singer-io/tap-hubspot/pull/112) + +## 2.6.4 + * When making `deals` requests, only attach `properties` if selected [#102](https://github.com/singer-io/tap-hubspot/pull/102) + +## 2.6.3 + * Use the metadata library better + +## 2.6.2 + * Revert the revert. Go back to v2.6.0. + +## 2.6.1 + * Revert v2.6.0 to v.2.5.2 + +## 2.6.0 + * Replaced `annotated_schema` with Singer `metadata` + * Added integration tests to CircleCI + +## 2.5.2 + * Companies and Engagements have a new pattern to catch records that are updated during a long-running sync. Rather than using a lookback window, the bookmark value will be limited to the `min(current_sync_start, max_bk_seen)` [#98](https://github.com/singer-io/tap-hubspot/pull/98) + +## 2.4.0 + * The owners stream can optionally fetch "inactive owners" [#92](https://github.com/singer-io/tap-hubspot/pull/92) + +## 2.3.0 + * Engagements will now track how long the stream takes to sync, and look back on the next run by that amount to cover potentially missed updates due to asynchronous updates during the previous sync [#91](https://github.com/singer-io/tap-hubspot/pull/91) + +## 2.2.8 + * When resuming an interrupted sync, will now attempt all streams before exiting [#90](https://github.com/singer-io/tap-hubspot/pull/90) + +## 2.2.7 + * Add `delivered`, `forward`, `print`, `reply`, `spamreport` to `campaigns.counters` + +## 2.2.6 + * Change a loop over `dict.items()` to `dict.values()` because the keys returned were not being used [#82](https://github.com/singer-io/tap-hubspot/pull/82) + +## 2.2.5 + * Update version of `requests` to `2.20.0` in response to CVE 2018-18074 + +## 2.2.4 + * Ensure that deal associations are being retrieved if `associations` are selected in the catalog [#79](https://github.com/singer-io/tap-hubspot/pull/79) + +## 2.2.3 + * Scrub the access token from error messages Hubspot returns when there are insufficient permissions [#75](https://github.com/singer-io/tap-hubspot/pull/75) + +## 2.2.2 + * Fix a bug with the 'engagements' stream which requires the 'engagement' field to have automatic inclusion [#74](https://github.com/singer-io/tap-hubspot/pull/74) + +## 2.2.1 + * Fix a bug with the 'inclusion' metadata for replication_key fields [#72](https://github.com/singer-io/tap-hubspot/pull/72) + +## 2.2.0 + * Adds property selection to the tap [#67](https://github.com/singer-io/tap-hubspot/pull/67) + * Removed the keywords stream as it is deprecated [#68](https://github.com/singer-io/tap-hubspot/pull/68) + * Schema updates [#69](https://github.com/singer-io/tap-hubspot/pull/69) [#70](https://github.com/singer-io/tap-hubspot/pull/70) diff --git a/archive/LICENSE b/archive/LICENSE new file mode 100644 index 0000000..627c3e9 --- /dev/null +++ b/archive/LICENSE @@ -0,0 +1,620 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + \ No newline at end of file diff --git a/archive/MANIFEST.in b/archive/MANIFEST.in new file mode 100644 index 0000000..be81b9f --- /dev/null +++ b/archive/MANIFEST.in @@ -0,0 +1,2 @@ +include LICENSE +include tap_hubspot/schemas/*.json diff --git a/archive/README.md b/archive/README.md new file mode 100644 index 0000000..77c99ac --- /dev/null +++ b/archive/README.md @@ -0,0 +1,44 @@ +# tap-hubspot + +This is a [Singer](https://singer.io) tap that produces JSON-formatted data following the [Singer spec](https://github.com/singer-io/getting-started/blob/master/SPEC.md). + +This tap: +- Pulls raw data from HubSpot's [REST API](http://developers.hubspot.com/docs/overview) +- Extracts the following resources from HubSpot + - [Campaigns](http://developers.hubspot.com/docs/methods/email/get_campaign_data) + - [Companies](http://developers.hubspot.com/docs/methods/companies/get_company) + - [Contacts](https://developers.hubspot.com/docs/methods/contacts/get_contacts) + - [Contact Lists](http://developers.hubspot.com/docs/methods/lists/get_lists) + - [Deals](http://developers.hubspot.com/docs/methods/deals/get_deals_modified) + - [Deal Pipelines](https://developers.hubspot.com/docs/methods/deal-pipelines/get-all-deal-pipelines) + - [Email Events](http://developers.hubspot.com/docs/methods/email/get_events) + - [Engagements](https://developers.hubspot.com/docs/methods/engagements/get-all-engagements) + - [Forms](http://developers.hubspot.com/docs/methods/forms/v2/get_forms) + - [Keywords](http://developers.hubspot.com/docs/methods/keywords/get_keywords) + - [Owners](http://developers.hubspot.com/docs/methods/owners/get_owners) + - [Subscription Changes](http://developers.hubspot.com/docs/methods/email/get_subscriptions_timeline) + - [Workflows](http://developers.hubspot.com/docs/methods/workflows/v3/get_workflows) + - [Tickets](https://developers.hubspot.com/docs/api/crm/tickets) +- Outputs the schema for each resource +- Incrementally pulls data based on the input state + +## Configuration + +This tap requires a `config.json` which specifies details regarding [OAuth 2.0](https://developers.hubspot.com/docs/methods/oauth2/oauth2-overview) authentication, a cutoff date for syncing historical data, an optional parameter request_timeout for which request should wait to get the response and an optional flag which controls collection of anonymous usage metrics. See [config.sample.json](config.sample.json) for an example. You may specify an API key instead of OAuth parameters for development purposes, as detailed below. + +To run `tap-hubspot` with the configuration file, use this command: + +```bash +› tap-hubspot -c my-config.json +``` + + +## API Key Authentication (for development) + +As an alternative to OAuth 2.0 authentication during development, you may specify an API key (`HAPIKEY`) to authenticate with the HubSpot API. This should be used only for low-volume development work, as the [HubSpot API Usage Guidelines](https://developers.hubspot.com/apps/api_guidelines) specify that integrations should use OAuth for authentication. + +To use an API key, include a `hapikey` configuration variable in your `config.json` and set it to the value of your HubSpot API key. Any OAuth authentication parameters in your `config.json` **will be ignored** if this key is present! + +--- + +Copyright © 2017 Stitch diff --git a/archive/bin/run-a-test.sh b/archive/bin/run-a-test.sh new file mode 100755 index 0000000..fc7a032 --- /dev/null +++ b/archive/bin/run-a-test.sh @@ -0,0 +1,5 @@ +set -exu +TEST_FILE=$1 +TEST_CLASS=$2 +TEST_NAME=$3 +nosetests tap_hubspot/tests/$TEST_FILE:$TEST_CLASS.$TEST_NAME diff --git a/archive/bin/run-all-tests.sh b/archive/bin/run-all-tests.sh new file mode 100755 index 0000000..9fa1873 --- /dev/null +++ b/archive/bin/run-all-tests.sh @@ -0,0 +1 @@ +nosetests tap_hubspot/tests/ diff --git a/archive/config.sample.json b/archive/config.sample.json new file mode 100644 index 0000000..0a93849 --- /dev/null +++ b/archive/config.sample.json @@ -0,0 +1,9 @@ +{ + "redirect_uri": "https://api.hubspot.com/", + "client_id": 123456789000, + "client_secret": "my_secret", + "refresh_token": "my_token", + "start_date": "2017-01-01T00:00:00Z", + "request_timeout": 300, + "disable_collection": false +} diff --git a/archive/setup.cfg b/archive/setup.cfg new file mode 100644 index 0000000..b88034e --- /dev/null +++ b/archive/setup.cfg @@ -0,0 +1,2 @@ +[metadata] +description-file = README.md diff --git a/archive/setup.py b/archive/setup.py new file mode 100644 index 0000000..8b8be70 --- /dev/null +++ b/archive/setup.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +from setuptools import setup + +setup(name='tap-hubspot', + version='2.12.1', + description='Singer.io tap for extracting data from the HubSpot API', + author='Stitch', + url='http://singer.io', + classifiers=['Programming Language :: Python :: 3 :: Only'], + py_modules=['tap_hubspot'], + install_requires=[ + 'attrs==16.3.0', + 'singer-python==5.13.0', + 'requests==2.20.0', + 'backoff==1.8.0', + 'requests_mock==1.3.0', + ], + extras_require= { + 'dev': [ + 'pylint==2.5.3', + 'nose==1.3.7', + ] + }, + entry_points=''' + [console_scripts] + tap-hubspot=tap_hubspot:main + ''', + packages=['tap_hubspot'], + package_data = { + 'tap_hubspot/schemas': [ + "campaigns.json", + "companies.json", + "contact_lists.json", + "contacts.json", + "deals.json", + "email_events.json", + "forms.json", + "keywords.json", + "owners.json", + "subscription_changes.json", + "workflows.json", + ], + }, + include_package_data=True, +) diff --git a/archive/tap_hubspot/__init__.py b/archive/tap_hubspot/__init__.py new file mode 100644 index 0000000..bd23ff4 --- /dev/null +++ b/archive/tap_hubspot/__init__.py @@ -0,0 +1,1306 @@ +#!/usr/bin/env python3 +import datetime +import pytz +import itertools +import os +import re +import sys +import json +# pylint: disable=import-error +import attr +import backoff +import requests +import singer +import singer.messages +from singer import metrics +from singer import metadata +from singer import utils +from singer import (transform, + UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING, + Transformer, _transform_datetime) + +LOGGER = singer.get_logger() +SESSION = requests.Session() + +REQUEST_TIMEOUT = 300 +class InvalidAuthException(Exception): + pass + +class SourceUnavailableException(Exception): + pass + +class DependencyException(Exception): + pass + +class UriTooLongException(Exception): + pass + +class DataFields: + offset = 'offset' + +class StateFields: + offset = 'offset' + this_stream = 'this_stream' + +BASE_URL = "https://api.hubapi.com" + +CONTACTS_BY_COMPANY = "contacts_by_company" + +DEFAULT_CHUNK_SIZE = 1000 * 60 * 60 * 24 + +V3_PREFIXES = {'hs_date_entered', 'hs_date_exited', 'hs_time_in'} + +CONFIG = { + "access_token": None, + "token_expires": None, + "email_chunk_size": DEFAULT_CHUNK_SIZE, + "subscription_chunk_size": DEFAULT_CHUNK_SIZE, + + # in config.json + "redirect_uri": None, + "client_id": None, + "client_secret": None, + "refresh_token": None, + "start_date": None, + "hapikey": None, + "include_inactives": None, +} + +ENDPOINTS = { + "contacts_properties": "/properties/v1/contacts/properties", + "contacts_all": "/contacts/v1/lists/all/contacts/all", + "contacts_recent": "/contacts/v1/lists/recently_updated/contacts/recent", + "contacts_detail": "/contacts/v1/contact/vids/batch/", + + "companies_properties": "/companies/v2/properties", + "companies_all": "/companies/v2/companies/paged", + "companies_recent": "/companies/v2/companies/recent/modified", + "companies_detail": "/companies/v2/companies/{company_id}", + "contacts_by_company": "/companies/v2/companies/{company_id}/vids", + + "deals_properties": "/properties/v1/deals/properties", + "deals_all": "/deals/v1/deal/paged", + "deals_recent": "/deals/v1/deal/recent/modified", + "deals_detail": "/deals/v1/deal/{deal_id}", + + "deals_v3_batch_read": "/crm/v3/objects/deals/batch/read", + "deals_v3_properties": "/crm/v3/properties/deals", + + "deal_pipelines": "/deals/v1/pipelines", + + "campaigns_all": "/email/public/v1/campaigns/by-id", + "campaigns_detail": "/email/public/v1/campaigns/{campaign_id}", + + "engagements_all": "/engagements/v1/engagements/paged", + + "subscription_changes": "/email/public/v1/subscriptions/timeline", + "email_events": "/email/public/v1/events", + "contact_lists": "/contacts/v1/lists", + "forms": "/forms/v2/forms", + "workflows": "/automation/v3/workflows", + "owners": "/owners/v2/owners", + + "tickets_properties": "/crm/v3/properties/tickets", + "tickets": "/crm/v4/objects/tickets", +} + +def get_start(state, tap_stream_id, bookmark_key, older_bookmark_key=None): + """ + If the current bookmark_key is available in the state, then return the bookmark_key value. + If it is not available then check and return the older_bookmark_key in the state for the existing connection. + If none of the keys are available in the state for a particular stream, then return start_date. + + We have made this change because of an update in the replication key of the deals stream. + So, if any existing connections have only older_bookmark_key in the state then tap should utilize that bookmark value. + Then next time, the tap should use the current bookmark value. + """ + current_bookmark = singer.get_bookmark(state, tap_stream_id, bookmark_key) + if current_bookmark is None: + if older_bookmark_key: + previous_bookmark = singer.get_bookmark(state, tap_stream_id, older_bookmark_key) + if previous_bookmark: + return previous_bookmark + + return CONFIG['start_date'] + return current_bookmark + +def get_current_sync_start(state, tap_stream_id): + current_sync_start_value = singer.get_bookmark(state, tap_stream_id, "current_sync_start") + if current_sync_start_value is None: + return current_sync_start_value + return utils.strptime_to_utc(current_sync_start_value) + +def write_current_sync_start(state, tap_stream_id, start): + value = start + if start is not None: + value = utils.strftime(start) + return singer.write_bookmark(state, tap_stream_id, "current_sync_start", value) + +def clean_state(state): + """ Clear deprecated keys out of state. """ + for stream, bookmark_map in state.get("bookmarks", {}).items(): + if "last_sync_duration" in bookmark_map: + LOGGER.info("%s - Removing last_sync_duration from state.", stream) + state["bookmarks"][stream].pop("last_sync_duration", None) + +def get_selected_property_fields(catalog, mdata): + + fields = catalog.get("schema").get("properties").keys() + property_field_names = [] + for field in fields: + if "property_" in field: + field_metadata = mdata.get(('properties', field)) + if utils.should_sync_field(field_metadata.get('inclusion'), + field_metadata.get('selected')): + property_field_names.append(field.split("property_", 1)[1]) + return ",".join(property_field_names) + +def get_url(endpoint, **kwargs): + if endpoint not in ENDPOINTS: + raise ValueError("Invalid endpoint {}".format(endpoint)) + + return BASE_URL + ENDPOINTS[endpoint].format(**kwargs) + + +def get_field_type_schema(field_type): + if field_type == "bool": + return {"type": ["null", "boolean"]} + + elif field_type == "datetime": + return {"type": ["null", "string"], + "format": "date-time"} + + elif field_type == "number": + # A value like 'N/A' can be returned for this type, + # so we have to let this be a string sometimes + return {"type": ["null", "number", "string"]} + + else: + return {"type": ["null", "string"]} + +def get_field_schema(field_type, extras=False): + if extras: + return { + "type": "object", + "properties": { + "value": get_field_type_schema(field_type), + "timestamp": get_field_type_schema("datetime"), + "source": get_field_type_schema("string"), + "sourceId": get_field_type_schema("string"), + } + } + else: + return { + "type": "object", + "properties": { + "value": get_field_type_schema(field_type), + } + } + +def parse_custom_schema(entity_name, data): + if entity_name == "tickets": + return { + field['name']: get_field_type_schema(field['type']) + for field in data["results"] + } + + return { + field['name']: get_field_schema(field['type'], entity_name != 'contacts') + for field in data + } + + +def get_custom_schema(entity_name): + return parse_custom_schema(entity_name, request(get_url(entity_name + "_properties")).json()) + +def get_v3_schema(entity_name): + url = get_url("deals_v3_properties") + return parse_custom_schema(entity_name, request(url).json()['results']) + +def get_abs_path(path): + return os.path.join(os.path.dirname(os.path.realpath(__file__)), path) + +def load_associated_company_schema(): + associated_company_schema = load_schema("companies") + #pylint: disable=line-too-long + associated_company_schema['properties']['company-id'] = associated_company_schema['properties'].pop('companyId') + associated_company_schema['properties']['portal-id'] = associated_company_schema['properties'].pop('portalId') + return associated_company_schema + +def load_schema(entity_name): + schema = utils.load_json(get_abs_path('schemas/{}.json'.format(entity_name))) + if entity_name in ["contacts", "companies", "deals", "tickets"]: + custom_schema = get_custom_schema(entity_name) + + schema['properties']['properties'] = { + "type": "object", + "properties": custom_schema, + } + + if entity_name in ["deals"]: + v3_schema = get_v3_schema(entity_name) + for key, value in v3_schema.items(): + if any(prefix in key for prefix in V3_PREFIXES): + custom_schema[key] = value + + # Move properties to top level + custom_schema_top_level = {'property_{}'.format(k): v for k, v in custom_schema.items()} + schema['properties'].update(custom_schema_top_level) + + # Exclude properties_versions field for tickets stream. As the versions are not present in + # the api response. + if entity_name != "tickets": + # Make properties_versions selectable and share the same schema. + versions_schema = utils.load_json(get_abs_path('schemas/versions.json')) + schema['properties']['properties_versions'] = versions_schema + + if entity_name == "contacts": + schema['properties']['associated-company'] = load_associated_company_schema() + + return schema + +#pylint: disable=invalid-name +def acquire_access_token_from_refresh_token(): + payload = { + "grant_type": "refresh_token", + "redirect_uri": CONFIG['redirect_uri'], + "refresh_token": CONFIG['refresh_token'], + "client_id": CONFIG['client_id'], + "client_secret": CONFIG['client_secret'], + } + + + resp = requests.post(BASE_URL + "/oauth/v1/token", data=payload, timeout=get_request_timeout()) + if resp.status_code == 403: + raise InvalidAuthException(resp.content) + + resp.raise_for_status() + auth = resp.json() + CONFIG['access_token'] = auth['access_token'] + CONFIG['refresh_token'] = auth['refresh_token'] + CONFIG['token_expires'] = ( + datetime.datetime.utcnow() + + datetime.timedelta(seconds=auth['expires_in'] - 600)) + LOGGER.info("Token refreshed. Expires at %s", CONFIG['token_expires']) + + +def giveup(exc): + return exc.response is not None \ + and 400 <= exc.response.status_code < 500 \ + and exc.response.status_code != 429 + +def on_giveup(details): + if len(details['args']) == 2: + url, params = details['args'] + else: + url = details['args'] + params = {} + + raise Exception("Giving up on request after {} tries with url {} and params {}" \ + .format(details['tries'], url, params)) + +URL_SOURCE_RE = re.compile(BASE_URL + r'/(\w+)/') + +def parse_source_from_url(url): + match = URL_SOURCE_RE.match(url) + if match: + return match.group(1) + return None + +def get_params_and_headers(params): + """ + This function makes a params object and headers object based on the + authentication values available. If there is an `hapikey` in the config, we + need that in `params` and not in the `headers`. Otherwise, we need to get an + `access_token` to put in the `headers` and not in the `params` + """ + params = params or {} + hapikey = CONFIG['hapikey'] + if hapikey is None: + if CONFIG['token_expires'] is None or CONFIG['token_expires'] < datetime.datetime.utcnow(): + acquire_access_token_from_refresh_token() + headers = {'Authorization': 'Bearer {}'.format(CONFIG['access_token'])} + else: + params['hapikey'] = hapikey + headers = {} + + if 'user_agent' in CONFIG: + headers['User-Agent'] = CONFIG['user_agent'] + + return params, headers + + +# backoff for Timeout error is already included in "requests.exceptions.RequestException" +# as it is a parent class of "Timeout" error +@backoff.on_exception(backoff.constant, + (requests.exceptions.RequestException, + requests.exceptions.HTTPError), + max_tries=5, + jitter=None, + giveup=giveup, + on_giveup=on_giveup, + interval=10) +def request(url, params=None): + + params, headers = get_params_and_headers(params) + + req = requests.Request('GET', url, params=params, headers=headers).prepare() + LOGGER.info("GET %s", req.url) + with metrics.http_request_timer(parse_source_from_url(url)) as timer: + resp = SESSION.send(req, timeout=get_request_timeout()) + timer.tags[metrics.Tag.http_status_code] = resp.status_code + if resp.status_code == 403: + raise SourceUnavailableException(resp.content) + elif resp.status_code == 414: + raise UriTooLongException(resp.content) + resp.raise_for_status() + + return resp +# {"bookmarks" : {"contacts" : { "lastmodifieddate" : "2001-01-01" +# "offset" : {"vidOffset": 1234 +# "timeOffset": "3434434 }} +# "users" : { "timestamp" : "2001-01-01"}} +# "currently_syncing" : "contacts" +# } +# } + +def lift_properties_and_versions(record): + for key, value in record.get('properties', {}).items(): + computed_key = "property_{}".format(key) + record[computed_key] = value + if isinstance(value, dict): + versions = value.get('versions') + if versions: + if not record.get('properties_versions'): + record['properties_versions'] = [] + record['properties_versions'] += versions + return record + +# backoff for Timeout error is already included in "requests.exceptions.RequestException" +# as it is a parent class of "Timeout" error +@backoff.on_exception(backoff.constant, + (requests.exceptions.RequestException, + requests.exceptions.HTTPError), + max_tries=5, + jitter=None, + giveup=giveup, + on_giveup=on_giveup, + interval=10) +def post_search_endpoint(url, data, params=None): + + params, headers = get_params_and_headers(params) + headers['content-type'] = "application/json" + + with metrics.http_request_timer(url) as _: + resp = requests.post( + url=url, + json=data, + params=params, + timeout=get_request_timeout(), + headers=headers + ) + + resp.raise_for_status() + + return resp + +def merge_responses(v1_data, v3_data): + for v1_record in v1_data: + v1_id = v1_record.get('dealId') + for v3_record in v3_data: + v3_id = v3_record.get('id') + if str(v1_id) == v3_id: + v1_record['properties'] = {**v1_record['properties'], + **v3_record['properties']} + +def process_v3_deals_records(v3_data): + """ + This function: + 1. filters out fields that don't contain 'hs_date_entered_*' and + 'hs_date_exited_*' + 2. changes a key value pair in `properties` to a key paired to an + object with a key 'value' and the original value + """ + transformed_v3_data = [] + for record in v3_data: + new_properties = {field_name : {'value': field_value} + for field_name, field_value in record['properties'].items() + if any(prefix in field_name for prefix in V3_PREFIXES)} + transformed_v3_data.append({**record, 'properties' : new_properties}) + return transformed_v3_data + +def get_v3_deals(v3_fields, v1_data): + v1_ids = [{'id': str(record['dealId'])} for record in v1_data] + + # Sending the first v3_field is enough to get them all + v3_body = {'inputs': v1_ids, + 'properties': [v3_fields[0]],} + v3_url = get_url('deals_v3_batch_read') + v3_resp = post_search_endpoint(v3_url, v3_body) + return v3_resp.json()['results'] + +#pylint: disable=line-too-long +def gen_request(STATE, tap_stream_id, url, params, path, more_key, offset_keys, offset_targets, v3_fields=None): + if len(offset_keys) != len(offset_targets): + raise ValueError("Number of offset_keys must match number of offset_targets") + + if singer.get_offset(STATE, tap_stream_id): + params.update(singer.get_offset(STATE, tap_stream_id)) + + with metrics.record_counter(tap_stream_id) as counter: + while True: + data = request(url, params).json() + + if data.get(path) is None: + raise RuntimeError("Unexpected API response: {} not in {}".format(path, data.keys())) + + if v3_fields: + v3_data = get_v3_deals(v3_fields, data[path]) + + # The shape of v3_data is different than the V1 response, + # so we transform v3 to look like v1 + transformed_v3_data = process_v3_deals_records(v3_data) + merge_responses(data[path], transformed_v3_data) + + for row in data[path]: + counter.increment() + yield row + + if not data.get(more_key, False): + break + + STATE = singer.clear_offset(STATE, tap_stream_id) + for key, target in zip(offset_keys, offset_targets): + if key in data: + params[target] = data[key] + STATE = singer.set_offset(STATE, tap_stream_id, target, data[key]) + + singer.write_state(STATE) + + STATE = singer.clear_offset(STATE, tap_stream_id) + singer.write_state(STATE) + + +def _sync_contact_vids(catalog, vids, schema, bumble_bee, bookmark_values, bookmark_key): + if len(vids) == 0: + return + + data = request(get_url("contacts_detail"), params={'vid': vids, 'showListMemberships' : True, "formSubmissionMode" : "all"}).json() + time_extracted = utils.now() + mdata = metadata.to_map(catalog.get('metadata')) + for record in data.values(): + # Explicitly add the bookmark field "versionTimestamp" and its value in the record. + record[bookmark_key] = bookmark_values.get(record.get("vid")) + record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata) + singer.write_record("contacts", record, catalog.get('stream_alias'), time_extracted=time_extracted) + +default_contact_params = { + 'showListMemberships': True, + 'includeVersion': True, + 'count': 100, +} + +def sync_contacts(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + bookmark_key = 'versionTimestamp' + start = utils.strptime_with_tz(get_start(STATE, "contacts", bookmark_key)) + LOGGER.info("sync_contacts from %s", start) + + max_bk_value = start + schema = load_schema("contacts") + + singer.write_schema("contacts", schema, ["vid"], [bookmark_key], catalog.get('stream_alias')) + + url = get_url("contacts_all") + + vids = [] + # Dict to store replication key value for each contact record + bookmark_values = {} + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + # To handle records updated between start of the table sync and the end, + # store the current sync start in the state and not move the bookmark past this value. + sync_start_time = utils.now() + for row in gen_request(STATE, 'contacts', url, default_contact_params, 'contacts', 'has-more', ['vid-offset'], ['vidOffset']): + modified_time = None + if bookmark_key in row: + modified_time = utils.strptime_with_tz( + _transform_datetime( # pylint: disable=protected-access + row[bookmark_key], + UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING)) + + if not modified_time or modified_time >= start: + vids.append(row['vid']) + # Adding replication key value in `bookmark_values` dict + # Here, key is vid(primary key) and value is replication key value. + bookmark_values[row['vid']] = utils.strftime(modified_time) + + if modified_time and modified_time >= max_bk_value: + max_bk_value = modified_time + + if len(vids) == 100: + _sync_contact_vids(catalog, vids, schema, bumble_bee, bookmark_values, bookmark_key) + vids = [] + + _sync_contact_vids(catalog, vids, schema, bumble_bee, bookmark_values, bookmark_key) + + # Don't bookmark past the start of this sync to account for updated records during the sync. + new_bookmark = min(max_bk_value, sync_start_time) + STATE = singer.write_bookmark(STATE, 'contacts', bookmark_key, utils.strftime(new_bookmark)) + singer.write_state(STATE) + return STATE + +class ValidationPredFailed(Exception): + pass + +# companies_recent only supports 10,000 results. If there are more than this, +# we'll need to use the companies_all endpoint +def use_recent_companies_endpoint(response): + return response["total"] < 10000 + +default_contacts_by_company_params = {'count' : 100} + +# NB> to do: support stream aliasing and field selection +def _sync_contacts_by_company(STATE, ctx, company_id): + schema = load_schema(CONTACTS_BY_COMPANY) + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + url = get_url("contacts_by_company", company_id=company_id) + path = 'vids' + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + with metrics.record_counter(CONTACTS_BY_COMPANY) as counter: + data = request(url, default_contacts_by_company_params).json() + + if data.get(path) is None: + raise RuntimeError("Unexpected API response: {} not in {}".format(path, data.keys())) + + for row in data[path]: + counter.increment() + record = {'company-id' : company_id, + 'contact-id' : row} + record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata) + singer.write_record("contacts_by_company", record, time_extracted=utils.now()) + + return STATE + +default_company_params = { + 'limit': 250, 'properties': ["createdate", "hs_lastmodifieddate"] +} + +def sync_companies(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + bumble_bee = Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) + bookmark_key = 'property_hs_lastmodifieddate' + bookmark_field_in_record = 'hs_lastmodifieddate' + + start = utils.strptime_to_utc(get_start(STATE, "companies", bookmark_key, older_bookmark_key=bookmark_field_in_record)) + LOGGER.info("sync_companies from %s", start) + schema = load_schema('companies') + singer.write_schema("companies", schema, ["companyId"], [bookmark_key], catalog.get('stream_alias')) + + # Because this stream doesn't query by `lastUpdated`, it cycles + # through the data set every time. The issue with this is that there + # is a race condition by which records may be updated between the + # start of this table's sync and the end, causing some updates to not + # be captured, in order to combat this, we must store the current + # sync's start in the state and not move the bookmark past this value. + current_sync_start = get_current_sync_start(STATE, "companies") or utils.now() + STATE = write_current_sync_start(STATE, "companies", current_sync_start) + singer.write_state(STATE) + + url = get_url("companies_all") + max_bk_value = start + if CONTACTS_BY_COMPANY in ctx.selected_stream_ids: + contacts_by_company_schema = load_schema(CONTACTS_BY_COMPANY) + singer.write_schema("contacts_by_company", contacts_by_company_schema, ["company-id", "contact-id"]) + + with bumble_bee: + for row in gen_request(STATE, 'companies', url, default_company_params, 'companies', 'has-more', ['offset'], ['offset']): + row_properties = row['properties'] + modified_time = None + if bookmark_field_in_record in row_properties: + # Hubspot returns timestamps in millis + timestamp_millis = row_properties[bookmark_field_in_record]['timestamp'] / 1000.0 + modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc) + elif 'createdate' in row_properties: + # Hubspot returns timestamps in millis + timestamp_millis = row_properties['createdate']['timestamp'] / 1000.0 + modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc) + + if modified_time and modified_time >= max_bk_value: + max_bk_value = modified_time + + if not modified_time or modified_time >= start: + record = request(get_url("companies_detail", company_id=row['companyId'])).json() + record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata) + singer.write_record("companies", record, catalog.get('stream_alias'), time_extracted=utils.now()) + if CONTACTS_BY_COMPANY in ctx.selected_stream_ids: + STATE = _sync_contacts_by_company(STATE, ctx, record['companyId']) + + # Don't bookmark past the start of this sync to account for updated records during the sync. + new_bookmark = min(max_bk_value, current_sync_start) + STATE = singer.write_bookmark(STATE, 'companies', bookmark_key, utils.strftime(new_bookmark)) + STATE = write_current_sync_start(STATE, 'companies', None) + singer.write_state(STATE) + return STATE + +def has_selected_custom_field(mdata): + top_level_custom_props = [x for x in mdata if len(x) == 2 and 'property_' in x[1]] + for prop in top_level_custom_props: + # Return 'True' if the custom field is automatic. + if (mdata.get(prop, {}).get('selected') is True) or (mdata.get(prop, {}).get('inclusion') == "automatic"): + return True + return False + +def sync_deals(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + bookmark_key = 'property_hs_lastmodifieddate' + # The Bookmark field('hs_lastmodifieddate') available in the record is different from + # the tap's bookmark key(property_hs_lastmodifieddate). + # `hs_lastmodifieddate` is available in the properties field at the nested level. + # As `hs_lastmodifieddate` is not available at the 1st level it can not be marked as automatic inclusion. + # tap includes all nested fields of the properties field as custom fields in the schema by appending the + # prefix `property_` along with each field. + # That's why bookmark_key is `property_hs_lastmodifieddate` so that we can mark it as automatic inclusion. + + last_modified_date = 'hs_lastmodifieddate' + + # Tap was used to write bookmark using replication key `hs_lastmodifieddate`. + # Now, as the replication key gets changed to "property_hs_lastmodifieddate", `get_start` function would return + # bookmark value of older bookmark key(`hs_lastmodifieddate`) if it is available. + # So, here `older_bookmark_key` is the previous bookmark key that may be available in the state of + # the existing connection. + + start = utils.strptime_with_tz(get_start(STATE, "deals", bookmark_key, older_bookmark_key=last_modified_date)) + max_bk_value = start + LOGGER.info("sync_deals from %s", start) + params = {'limit': 100, + 'includeAssociations': False, + 'properties' : []} + + schema = load_schema("deals") + singer.write_schema("deals", schema, ["dealId"], [bookmark_key], catalog.get('stream_alias')) + + # Check if we should include associations + for key in mdata.keys(): + if 'associations' in key: + assoc_mdata = mdata.get(key) + if (assoc_mdata.get('selected') and assoc_mdata.get('selected') is True): + params['includeAssociations'] = True + + v3_fields = None + has_selected_properties = mdata.get(('properties', 'properties'), {}).get('selected') + if has_selected_properties or has_selected_custom_field(mdata): + # On 2/12/20, hubspot added a lot of additional properties for + # deals, and appending all of them to requests ended up leading to + # 414 (url-too-long) errors. Hubspot recommended we use the + # `includeAllProperties` and `allpropertiesFetchMode` params + # instead. + params['includeAllProperties'] = True + params['allPropertiesFetchMode'] = 'latest_version' + + # Grab selected `hs_date_entered/exited` fields to call the v3 endpoint with + v3_fields = [breadcrumb[1].replace('property_', '') + for breadcrumb, mdata_map in mdata.items() + if breadcrumb + and (mdata_map.get('selected') is True or has_selected_properties) + and any(prefix in breadcrumb[1] for prefix in V3_PREFIXES)] + + url = get_url('deals_all') + + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + # To handle records updated between start of the table sync and the end, + # store the current sync start in the state and not move the bookmark past this value. + sync_start_time = utils.now() + for row in gen_request(STATE, 'deals', url, params, 'deals', "hasMore", ["offset"], ["offset"], v3_fields=v3_fields): + row_properties = row['properties'] + modified_time = None + if last_modified_date in row_properties: + # Hubspot returns timestamps in millis + timestamp_millis = row_properties[last_modified_date]['timestamp'] / 1000.0 + modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc) + elif 'createdate' in row_properties: + # Hubspot returns timestamps in millis + timestamp_millis = row_properties['createdate']['timestamp'] / 1000.0 + modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc) + if modified_time and modified_time >= max_bk_value: + max_bk_value = modified_time + + if not modified_time or modified_time >= start: + record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) + singer.write_record("deals", record, catalog.get('stream_alias'), time_extracted=utils.now()) + + # Don't bookmark past the start of this sync to account for updated records during the sync. + new_bookmark = min(max_bk_value, sync_start_time) + STATE = singer.write_bookmark(STATE, 'deals', bookmark_key, utils.strftime(new_bookmark)) + singer.write_state(STATE) + return STATE + + +def gen_request_tickets(tap_stream_id, url, params, path, more_key): + """ + Cursor-based API Pagination : Used in tickets stream implementation + """ + with metrics.record_counter(tap_stream_id) as counter: + while True: + data = request(url, params).json() + + if data.get(path) is None: + raise RuntimeError( + "Unexpected API response: {} not in {}".format(path, data.keys())) + + for row in data[path]: + counter.increment() + yield row + + if not data.get(more_key): + break + params['after'] = data.get(more_key).get('next').get('after') + +def sync_tickets(STATE, ctx): + """ + Function to sync `tickets` stream records + """ + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + stream_id = "tickets" + primary_key = "id" + bookmark_key = "updatedAt" + + max_bk_value = bookmark_value = utils.strptime_with_tz( + get_start(STATE, stream_id, bookmark_key)) + LOGGER.info("sync_tickets from %s", bookmark_value) + + params = {'limit': 100, + 'associations': 'contact,company,deals', + 'properties': get_selected_property_fields(catalog, mdata), + 'archived': False + } + + schema = load_schema(stream_id) + singer.write_schema(stream_id, schema, [primary_key], + [bookmark_key], catalog.get('stream_alias')) + + url = get_url(stream_id) + + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as transformer: + # To handle records updated between start of the table sync and the end, + # store the current sync start in the state and not move the bookmark past this value. + sync_start_time = utils.now() + for row in gen_request_tickets(stream_id, url, params, 'results', "paging"): + # parsing the string formatted date to datetime object + modified_time = utils.strptime_to_utc(row[bookmark_key]) + + # Checking the bookmark value is present on the record and it + # is greater than or equal to defined previous bookmark value + if modified_time and modified_time >= bookmark_value: + # transforms the data and filters out the selected fields from the catalog + record = transformer.transform(lift_properties_and_versions(row), schema, mdata) + singer.write_record(stream_id, record, catalog.get( + 'stream_alias'), time_extracted=utils.now()) + if modified_time and modified_time >= max_bk_value: + max_bk_value = modified_time + + # Don't bookmark past the start of this sync to account for updated records during the sync. + new_bookmark = min(max_bk_value, sync_start_time) + STATE = singer.write_bookmark(STATE, stream_id, bookmark_key, utils.strftime(new_bookmark)) + singer.write_state(STATE) + return STATE + + +# NB> no suitable bookmark is available: https://developers.hubspot.com/docs/methods/email/get_campaigns_by_id +def sync_campaigns(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + schema = load_schema("campaigns") + singer.write_schema("campaigns", schema, ["id"], catalog.get('stream_alias')) + LOGGER.info("sync_campaigns(NO bookmarks)") + url = get_url("campaigns_all") + params = {'limit': 500} + + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + for row in gen_request(STATE, 'campaigns', url, params, "campaigns", "hasMore", ["offset"], ["offset"]): + record = request(get_url("campaigns_detail", campaign_id=row['id'])).json() + record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata) + singer.write_record("campaigns", record, catalog.get('stream_alias'), time_extracted=utils.now()) + + return STATE + + +def sync_entity_chunked(STATE, catalog, entity_name, key_properties, path): + schema = load_schema(entity_name) + bookmark_key = 'startTimestamp' + + singer.write_schema(entity_name, schema, key_properties, [bookmark_key], catalog.get('stream_alias')) + + start = get_start(STATE, entity_name, bookmark_key) + LOGGER.info("sync_%s from %s", entity_name, start) + + now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) + now_ts = int(now.timestamp() * 1000) + + start_ts = int(utils.strptime_with_tz(start).timestamp() * 1000) + url = get_url(entity_name) + + mdata = metadata.to_map(catalog.get('metadata')) + + if entity_name == 'email_events': + window_size = int(CONFIG['email_chunk_size']) + elif entity_name == 'subscription_changes': + window_size = int(CONFIG['subscription_chunk_size']) + + with metrics.record_counter(entity_name) as counter: + while start_ts < now_ts: + end_ts = start_ts + window_size + params = { + 'startTimestamp': start_ts, + 'endTimestamp': end_ts, + 'limit': 1000, + } + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + while True: + our_offset = singer.get_offset(STATE, entity_name) + if bool(our_offset) and our_offset.get('offset') is not None: + params[StateFields.offset] = our_offset.get('offset') + + data = request(url, params).json() + time_extracted = utils.now() + + if data.get(path) is None: + raise RuntimeError("Unexpected API response: {} not in {}".format(path, data.keys())) + + for row in data[path]: + counter.increment() + record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) + singer.write_record(entity_name, + record, + catalog.get('stream_alias'), + time_extracted=time_extracted) + if data.get('hasMore'): + STATE = singer.set_offset(STATE, entity_name, 'offset', data['offset']) + singer.write_state(STATE) + else: + STATE = singer.clear_offset(STATE, entity_name) + singer.write_state(STATE) + break + STATE = singer.write_bookmark(STATE, entity_name, 'startTimestamp', utils.strftime(datetime.datetime.fromtimestamp((start_ts / 1000), datetime.timezone.utc))) # pylint: disable=line-too-long + singer.write_state(STATE) + start_ts = end_ts + + STATE = singer.clear_offset(STATE, entity_name) + singer.write_state(STATE) + return STATE + +def sync_subscription_changes(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + STATE = sync_entity_chunked(STATE, catalog, "subscription_changes", ["timestamp", "portalId", "recipient"], + "timeline") + return STATE + +def sync_email_events(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + STATE = sync_entity_chunked(STATE, catalog, "email_events", ["id"], "events") + return STATE + +def sync_contact_lists(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + schema = load_schema("contact_lists") + bookmark_key = 'updatedAt' + singer.write_schema("contact_lists", schema, ["listId"], [bookmark_key], catalog.get('stream_alias')) + + start = get_start(STATE, "contact_lists", bookmark_key) + max_bk_value = start + + LOGGER.info("sync_contact_lists from %s", start) + + url = get_url("contact_lists") + params = {'count': 250} + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + # To handle records updated between start of the table sync and the end, + # store the current sync start in the state and not move the bookmark past this value. + sync_start_time = utils.now() + for row in gen_request(STATE, 'contact_lists', url, params, "lists", "has-more", ["offset"], ["offset"]): + record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) + + if record[bookmark_key] >= start: + singer.write_record("contact_lists", record, catalog.get('stream_alias'), time_extracted=utils.now()) + if record[bookmark_key] >= max_bk_value: + max_bk_value = record[bookmark_key] + + # Don't bookmark past the start of this sync to account for updated records during the sync. + new_bookmark = min(utils.strptime_to_utc(max_bk_value), sync_start_time) + STATE = singer.write_bookmark(STATE, 'contact_lists', bookmark_key, utils.strftime(new_bookmark)) + singer.write_state(STATE) + + return STATE + +def sync_forms(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + schema = load_schema("forms") + bookmark_key = 'updatedAt' + + singer.write_schema("forms", schema, ["guid"], [bookmark_key], catalog.get('stream_alias')) + start = get_start(STATE, "forms", bookmark_key) + max_bk_value = start + + LOGGER.info("sync_forms from %s", start) + + data = request(get_url("forms")).json() + time_extracted = utils.now() + + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + # To handle records updated between start of the table sync and the end, + # store the current sync start in the state and not move the bookmark past this value. + sync_start_time = utils.now() + for row in data: + record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) + + if record[bookmark_key] >= start: + singer.write_record("forms", record, catalog.get('stream_alias'), time_extracted=time_extracted) + if record[bookmark_key] >= max_bk_value: + max_bk_value = record[bookmark_key] + + # Don't bookmark past the start of this sync to account for updated records during the sync. + new_bookmark = min(utils.strptime_to_utc(max_bk_value), sync_start_time) + STATE = singer.write_bookmark(STATE, 'forms', bookmark_key, utils.strftime(new_bookmark)) + singer.write_state(STATE) + + return STATE + +def sync_workflows(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + schema = load_schema("workflows") + bookmark_key = 'updatedAt' + singer.write_schema("workflows", schema, ["id"], [bookmark_key], catalog.get('stream_alias')) + start = get_start(STATE, "workflows", bookmark_key) + max_bk_value = start + + STATE = singer.write_bookmark(STATE, 'workflows', bookmark_key, max_bk_value) + singer.write_state(STATE) + + LOGGER.info("sync_workflows from %s", start) + + data = request(get_url("workflows")).json() + time_extracted = utils.now() + + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + # To handle records updated between start of the table sync and the end, + # store the current sync start in the state and not move the bookmark past this value. + sync_start_time = utils.now() + for row in data['workflows']: + record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) + if record[bookmark_key] >= start: + singer.write_record("workflows", record, catalog.get('stream_alias'), time_extracted=time_extracted) + if record[bookmark_key] >= max_bk_value: + max_bk_value = record[bookmark_key] + + # Don't bookmark past the start of this sync to account for updated records during the sync. + new_bookmark = min(utils.strptime_to_utc(max_bk_value), sync_start_time) + STATE = singer.write_bookmark(STATE, 'workflows', bookmark_key, utils.strftime(new_bookmark)) + singer.write_state(STATE) + return STATE + +def sync_owners(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + schema = load_schema("owners") + bookmark_key = 'updatedAt' + + singer.write_schema("owners", schema, ["ownerId"], [bookmark_key], catalog.get('stream_alias')) + start = get_start(STATE, "owners", bookmark_key) + max_bk_value = start + + LOGGER.info("sync_owners from %s", start) + + params = {} + if CONFIG.get('include_inactives'): + params['includeInactives'] = "true" + data = request(get_url("owners"), params).json() + + time_extracted = utils.now() + + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + # To handle records updated between start of the table sync and the end, + # store the current sync start in the state and not move the bookmark past this value. + sync_start_time = utils.now() + for row in data: + record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) + if record[bookmark_key] >= max_bk_value: + max_bk_value = record[bookmark_key] + + if record[bookmark_key] >= start: + singer.write_record("owners", record, catalog.get('stream_alias'), time_extracted=time_extracted) + + # Don't bookmark past the start of this sync to account for updated records during the sync. + new_bookmark = min(utils.strptime_to_utc(max_bk_value), sync_start_time) + STATE = singer.write_bookmark(STATE, 'owners', bookmark_key, utils.strftime(new_bookmark)) + singer.write_state(STATE) + return STATE + +def sync_engagements(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + schema = load_schema("engagements") + bookmark_key = 'lastUpdated' + singer.write_schema("engagements", schema, ["engagement_id"], [bookmark_key], catalog.get('stream_alias')) + start = get_start(STATE, "engagements", bookmark_key) + + # Because this stream doesn't query by `lastUpdated`, it cycles + # through the data set every time. The issue with this is that there + # is a race condition by which records may be updated between the + # start of this table's sync and the end, causing some updates to not + # be captured, in order to combat this, we must store the current + # sync's start in the state and not move the bookmark past this value. + current_sync_start = get_current_sync_start(STATE, "engagements") or utils.now() + STATE = write_current_sync_start(STATE, "engagements", current_sync_start) + singer.write_state(STATE) + + max_bk_value = start + LOGGER.info("sync_engagements from %s", start) + + STATE = singer.write_bookmark(STATE, 'engagements', bookmark_key, start) + singer.write_state(STATE) + + url = get_url("engagements_all") + params = {'limit': 250} + top_level_key = "results" + engagements = gen_request(STATE, 'engagements', url, params, top_level_key, "hasMore", ["offset"], ["offset"]) + + time_extracted = utils.now() + + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + for engagement in engagements: + record = bumble_bee.transform(lift_properties_and_versions(engagement), schema, mdata) + if record['engagement'][bookmark_key] >= start: + # hoist PK and bookmark field to top-level record + record['engagement_id'] = record['engagement']['id'] + record[bookmark_key] = record['engagement'][bookmark_key] + singer.write_record("engagements", record, catalog.get('stream_alias'), time_extracted=time_extracted) + if record['engagement'][bookmark_key] >= max_bk_value: + max_bk_value = record['engagement'][bookmark_key] + + # Don't bookmark past the start of this sync to account for updated records during the sync. + new_bookmark = min(utils.strptime_to_utc(max_bk_value), current_sync_start) + STATE = singer.write_bookmark(STATE, 'engagements', bookmark_key, utils.strftime(new_bookmark)) + STATE = write_current_sync_start(STATE, 'engagements', None) + singer.write_state(STATE) + return STATE + +def sync_deal_pipelines(STATE, ctx): + catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) + mdata = metadata.to_map(catalog.get('metadata')) + schema = load_schema('deal_pipelines') + singer.write_schema('deal_pipelines', schema, ['pipelineId'], catalog.get('stream_alias')) + LOGGER.info('sync_deal_pipelines') + data = request(get_url('deal_pipelines')).json() + with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: + for row in data: + record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) + singer.write_record("deal_pipelines", record, catalog.get('stream_alias'), time_extracted=utils.now()) + singer.write_state(STATE) + return STATE + +@attr.s +class Stream: + tap_stream_id = attr.ib() + sync = attr.ib() + key_properties = attr.ib() + replication_key = attr.ib() + replication_method = attr.ib() + +STREAMS = [ + # Do these first as they are incremental + Stream('subscription_changes', sync_subscription_changes, ['timestamp', 'portalId', 'recipient'], 'startTimestamp', 'INCREMENTAL'), + Stream('email_events', sync_email_events, ['id'], 'startTimestamp', 'INCREMENTAL'), + Stream('contacts', sync_contacts, ["vid"], 'versionTimestamp', 'INCREMENTAL'), + Stream('deals', sync_deals, ["dealId"], 'property_hs_lastmodifieddate', 'INCREMENTAL'), + Stream('companies', sync_companies, ["companyId"], 'property_hs_lastmodifieddate', 'INCREMENTAL'), + Stream('tickets', sync_tickets, ['id'], 'updatedAt', 'INCREMENTAL'), + + # Do these last as they are full table + Stream('forms', sync_forms, ['guid'], 'updatedAt', 'FULL_TABLE'), + Stream('workflows', sync_workflows, ['id'], 'updatedAt', 'FULL_TABLE'), + Stream('owners', sync_owners, ["ownerId"], 'updatedAt', 'FULL_TABLE'), + Stream('campaigns', sync_campaigns, ["id"], None, 'FULL_TABLE'), + Stream('contact_lists', sync_contact_lists, ["listId"], 'updatedAt', 'FULL_TABLE'), + Stream('deal_pipelines', sync_deal_pipelines, ['pipelineId'], None, 'FULL_TABLE'), + Stream('engagements', sync_engagements, ["engagement_id"], 'lastUpdated', 'FULL_TABLE') +] + +def get_streams_to_sync(streams, state): + target_stream = singer.get_currently_syncing(state) + result = streams + if target_stream: + skipped = list(itertools.takewhile( + lambda x: x.tap_stream_id != target_stream, streams)) + rest = list(itertools.dropwhile( + lambda x: x.tap_stream_id != target_stream, streams)) + result = rest + skipped # Move skipped streams to end + if not result: + raise Exception('Unknown stream {} in state'.format(target_stream)) + return result + +def get_selected_streams(remaining_streams, ctx): + selected_streams = [] + for stream in remaining_streams: + if stream.tap_stream_id in ctx.selected_stream_ids: + selected_streams.append(stream) + return selected_streams + +def do_sync(STATE, catalog): + # Clear out keys that are no longer used + clean_state(STATE) + + ctx = Context(catalog) + validate_dependencies(ctx) + + remaining_streams = get_streams_to_sync(STREAMS, STATE) + selected_streams = get_selected_streams(remaining_streams, ctx) + LOGGER.info('Starting sync. Will sync these streams: %s', + [stream.tap_stream_id for stream in selected_streams]) + for stream in selected_streams: + LOGGER.info('Syncing %s', stream.tap_stream_id) + STATE = singer.set_currently_syncing(STATE, stream.tap_stream_id) + singer.write_state(STATE) + + try: + STATE = stream.sync(STATE, ctx) # pylint: disable=not-callable + except SourceUnavailableException as ex: + error_message = str(ex).replace(CONFIG['access_token'], 10 * '*') + LOGGER.error(error_message) + except UriTooLongException as ex: + LOGGER.fatal(f"For stream - {stream.tap_stream_id}, please select fewer fields. " + f"The current selection exceeds Hubspot's maximum character allowance.") + raise ex + STATE = singer.set_currently_syncing(STATE, None) + singer.write_state(STATE) + LOGGER.info("Sync completed") + +class Context: + def __init__(self, catalog): + self.selected_stream_ids = set() + + for stream in catalog.get('streams'): + mdata = metadata.to_map(stream['metadata']) + if metadata.get(mdata, (), 'selected'): + self.selected_stream_ids.add(stream['tap_stream_id']) + + self.catalog = catalog + + def get_catalog_from_id(self, tap_stream_id): + return [c for c in self.catalog.get('streams') if c.get('stream') == tap_stream_id][0] + +# stream a is dependent on stream STREAM_DEPENDENCIES[a] +STREAM_DEPENDENCIES = { + CONTACTS_BY_COMPANY: 'companies' +} + +def validate_dependencies(ctx): + errs = [] + msg_tmpl = ("Unable to extract {0} data. " + "To receive {0} data, you also need to select {1}.") + + for k, v in STREAM_DEPENDENCIES.items(): + if k in ctx.selected_stream_ids and v not in ctx.selected_stream_ids: + errs.append(msg_tmpl.format(k, v)) + if errs: + raise DependencyException(" ".join(errs)) + +def load_discovered_schema(stream): + schema = load_schema(stream.tap_stream_id) + mdata = metadata.new() + + mdata = metadata.write(mdata, (), 'table-key-properties', stream.key_properties) + mdata = metadata.write(mdata, (), 'forced-replication-method', stream.replication_method) + + if stream.replication_key: + mdata = metadata.write(mdata, (), 'valid-replication-keys', [stream.replication_key]) + + for field_name in schema['properties'].keys(): + if field_name in stream.key_properties or field_name == stream.replication_key: + mdata = metadata.write(mdata, ('properties', field_name), 'inclusion', 'automatic') + else: + mdata = metadata.write(mdata, ('properties', field_name), 'inclusion', 'available') + + # The engagements stream has nested data that we synthesize; The engagement field needs to be automatic + if stream.tap_stream_id == "engagements": + mdata = metadata.write(mdata, ('properties', 'engagement'), 'inclusion', 'automatic') + mdata = metadata.write(mdata, ('properties', 'lastUpdated'), 'inclusion', 'automatic') + + return schema, metadata.to_list(mdata) + +def discover_schemas(): + result = {'streams': []} + for stream in STREAMS: + LOGGER.info('Loading schema for %s', stream.tap_stream_id) + try: + schema, mdata = load_discovered_schema(stream) + result['streams'].append({'stream': stream.tap_stream_id, + 'tap_stream_id': stream.tap_stream_id, + 'schema': schema, + 'metadata': mdata}) + except SourceUnavailableException as ex: + # Skip the discovery mode on the streams were the required scopes are missing + warning_message = str(ex).replace(CONFIG['access_token'], 10 * '*') + LOGGER.warning(warning_message) + # Load the contacts_by_company schema + LOGGER.info('Loading schema for contacts_by_company') + contacts_by_company = Stream('contacts_by_company', _sync_contacts_by_company, ['company-id', 'contact-id'], None, 'FULL_TABLE') + schema, mdata = load_discovered_schema(contacts_by_company) + + result['streams'].append({'stream': CONTACTS_BY_COMPANY, + 'tap_stream_id': CONTACTS_BY_COMPANY, + 'schema': schema, + 'metadata': mdata}) + + return result + +def do_discover(): + LOGGER.info('Loading schemas') + json.dump(discover_schemas(), sys.stdout, indent=4) + +def get_request_timeout(): + # Get `request_timeout` value from config. + config_request_timeout = CONFIG.get('request_timeout') + # if config request_timeout is other than 0, "0" or "" then use request_timeout + if config_request_timeout and float(config_request_timeout): + request_timeout = float(config_request_timeout) + else: + # If value is 0, "0", "" or not passed then it set default to 300 seconds. + request_timeout = REQUEST_TIMEOUT + return request_timeout + +def main_impl(): + args = utils.parse_args( + ["redirect_uri", + "client_id", + "client_secret", + "refresh_token", + "start_date"]) + + CONFIG.update(args.config) + STATE = {} + + if args.state: + STATE.update(args.state) + + if args.discover: + do_discover() + elif args.properties: + do_sync(STATE, args.properties) + else: + LOGGER.info("No properties were selected") + +def main(): + try: + main_impl() + except Exception as exc: + LOGGER.critical(exc) + raise exc + +if __name__ == '__main__': + main() diff --git a/archive/tap_hubspot/schemas/campaigns.json b/archive/tap_hubspot/schemas/campaigns.json new file mode 100644 index 0000000..29797da --- /dev/null +++ b/archive/tap_hubspot/schemas/campaigns.json @@ -0,0 +1,91 @@ +{ + "type": "object", + "properties": { + "appId": { + "type": ["null", "integer"] + }, + "appName": { + "type": ["null", "string"] + }, + "contentId": { + "type": ["null", "integer"] + }, + "counters": { + "type": ["null", "object"], + "properties": { + "delievered": { + "type": ["null", "integer"] + }, + "open": { + "type": ["null", "integer"] + }, + "processed": { + "type": ["null", "integer"] + }, + "sent": { + "type": ["null", "integer"] + }, + "deferred": { + "type": ["null", "integer"] + }, + "unsubscribed": { + "type": ["null", "integer"] + }, + "statuschange": { + "type": ["null", "integer"] + }, + "bounce": { + "type": ["null", "integer"] + }, + "mta_dropped": { + "type": ["null", "integer"] + }, + "dropped": { + "type": ["null", "integer"] + }, + "suppressed": { + "type": ["null", "integer"] + }, + "click": { + "type": ["null", "integer"] + }, + "delivered": { + "type": ["null", "integer"] + }, + "forward": { + "type": ["null", "integer"] + }, + "print": { + "type": ["null", "integer"] + }, + "reply": { + "type": ["null", "integer"] + }, + "spamreport": { + "type": ["null", "integer"] + } + } + }, + "id": { + "type": ["null", "integer"] + }, + "name": { + "type": ["null", "string"] + }, + "numIncluded": { + "type": ["null", "integer"] + }, + "numQueued": { + "type": ["null", "integer"] + }, + "subType": { + "type": ["null", "string"] + }, + "subject": { + "type": ["null", "string"] + }, + "type": { + "type": ["null", "string"] + } + } +} diff --git a/archive/tap_hubspot/schemas/companies.json b/archive/tap_hubspot/schemas/companies.json new file mode 100644 index 0000000..286f249 --- /dev/null +++ b/archive/tap_hubspot/schemas/companies.json @@ -0,0 +1,11 @@ +{ + "type": "object", + "properties": { + "portalId": { + "type": ["null", "integer"] + }, + "companyId": { + "type": ["null", "integer"] + } + } +} diff --git a/archive/tap_hubspot/schemas/contact_lists.json b/archive/tap_hubspot/schemas/contact_lists.json new file mode 100644 index 0000000..d3ad2ae --- /dev/null +++ b/archive/tap_hubspot/schemas/contact_lists.json @@ -0,0 +1,97 @@ +{ + "type": "object", + "properties": { + "parentId": { + "type": ["null", "integer"] + }, + "metaData": { + "type": "object", + "properties": { + "processing": { + "type": ["null", "string"] + }, + "size": { + "type": ["null", "integer"] + }, + "error": { + "type": ["null", "string"] + }, + "lastProcessingStateChangeAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "lastSizeChangeAt": { + "type": ["null", "string"], + "format": "date-time" + } + } + }, + "dynamic": { + "type": ["null", "boolean"] + }, + "name": { + "type": ["null", "string"] + }, + "filters": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "object", + "properties": { + "filterFamily": { + "type": ["null", "string"] + }, + "withinTimeMode": { + "type": ["null", "string"] + }, + "checkPastVersions": { + "type": ["null", "boolean"] + }, + "type": { + "type": ["null", "string"] + }, + "property": { + "type": ["null", "string"] + }, + "value": { + "type": ["null", "string"] + }, + "operator": { + "type": ["null", "string"] + } + } + } + } + }, + "portalId": { + "type": ["null", "integer"] + }, + "createdAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "listId": { + "type": ["null", "integer"] + }, + "updatedAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "internalListId": { + "type": ["null", "integer"] + }, + "readOnly": { + "type": ["null", "boolean"] + }, + "deleteable": { + "type": ["null", "boolean"] + }, + "listType": { + "type": ["null", "string"] + }, + "archived": { + "type": ["null", "boolean"] + } + } +} diff --git a/archive/tap_hubspot/schemas/contacts.json b/archive/tap_hubspot/schemas/contacts.json new file mode 100644 index 0000000..35e610f --- /dev/null +++ b/archive/tap_hubspot/schemas/contacts.json @@ -0,0 +1,201 @@ +{ + "type": "object", + "properties": { + "vid": { + "type": ["null", "integer"] + }, + "versionTimestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "canonical-vid": { + "type": ["null", "integer"] + }, + "merged-vids": { + "type": ["null", "array"], + "items": { + "type": ["null", "integer"] + } + }, + "portal-id": { + "type": ["null", "integer"] + }, + "is-contact": { + "type": ["null", "boolean"] + }, + "profile-token": { + "type": ["null", "string"] + }, + "profile-url": { + "type": ["null", "string"] + }, + "associated-company" : { + "type": ["null", "object"], + "properties" : {} + }, + "identity-profiles": { + "type": ["null", "array"], + "items": { + "type": ["null", "object"], + "properties": { + "deleted-changed-timestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "saved-at-timestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "vid": { + "type": ["null", "integer"] + }, + "identities": { + "type": ["null", "array"], + "items": { + "type": ["null", "object"], + "properties": { + "timestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "type": { + "type": ["null", "string"] + }, + "value": { + "type": ["null", "string"] + } + } + } + } + } + } + }, + "list-memberships": { + "type": ["null", "array"], + "items": { + "type": ["null", "object"], + "properties": { + "internal-list-id": { + "type": ["null", "integer"] + }, + "is-member": { + "type": ["null", "boolean"] + }, + "static-list-id": { + "type": ["null", "integer"] + }, + "timestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "vid": { + "type": ["null", "integer"] + } + } + } + }, + "form-submissions": { + "type": ["null", "array"], + "items": { + "type": ["null", "object"], + "properties": { + "conversion-id": { + "type": ["null", "string"] + }, + "timestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "form-id": { + "type": ["null", "string"] + }, + "portal-id": { + "type": ["null", "integer"] + }, + "page-url": { + "type": ["null", "string"] + }, + "title": { + "type": ["null", "string"] + } + } + } + }, + "merge-audits": { + "type": ["null", "array"], + "items": { + "type": ["null", "object"], + "properties": { + "canonical-vid": { + "type": ["null", "integer"] + }, + "vid-to-merge": { + "type": ["null", "integer"] + }, + "timestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "user-id": { + "type": ["null", "integer"] + }, + "num-properties-moved": { + "type": ["null", "integer"] + }, + "merged_from_email": { + "type": ["null", "object"], + "properties": { + "value": { + "type": ["null", "string"] + }, + "source-type": { + "type": ["null", "string"] + }, + "source-id": { + "type": ["null", "string"] + }, + "source-label": { + "type": ["null", "string"] + }, + "source-vids": { + "type": ["null", "array"], + "items": { + "type": ["null", "integer"] + } + }, + "timestamp": { + "type": ["null", "integer"] + }, + "selected": { + "type": ["null", "boolean"] + } + } + }, + "merged_to_email": { + "type": ["null", "object"], + "properties": { + "value": { + "type": ["null", "string"] + }, + "source-type": { + "type": ["null", "string"] + }, + "source-id": { + "type": ["null", "string"] + }, + "source-label": { + "type": ["null", "string"] + }, + "timestamp": { + "type": ["null", "integer"] + }, + "selected": { + "type": ["null", "boolean"] + } + } + } + } + } + } + } +} diff --git a/archive/tap_hubspot/schemas/contacts_by_company.json b/archive/tap_hubspot/schemas/contacts_by_company.json new file mode 100644 index 0000000..dafd30b --- /dev/null +++ b/archive/tap_hubspot/schemas/contacts_by_company.json @@ -0,0 +1,12 @@ +{ + "type": "object", + "properties": { + "contact-id": { + "type": ["integer"] + }, + "company-id": { + "type": ["integer"] + } + }, + "additionalProperties": false +} diff --git a/archive/tap_hubspot/schemas/deal_pipelines.json b/archive/tap_hubspot/schemas/deal_pipelines.json new file mode 100644 index 0000000..e23a644 --- /dev/null +++ b/archive/tap_hubspot/schemas/deal_pipelines.json @@ -0,0 +1,46 @@ +{ + "type": "object", + "properties": { + "pipelineId": { + "type": ["null", "string"] + }, + "stages": { + "type": ["null", "array"], + "items": { + "type": "object", + "properties": { + "stageId": { + "type": ["null", "string"] + }, + "label": { + "type": ["null", "string"] + }, + "probability": { + "type": ["null", "number"] + }, + "active": { + "type": ["null", "boolean"] + }, + "displayOrder": { + "type": ["null", "integer"] + }, + "closedWon": { + "type": ["null", "boolean"] + } + } + } + }, + "label": { + "type": ["null", "string"] + }, + "active": { + "type": ["null", "boolean"] + }, + "displayOrder": { + "type": ["null", "integer"] + }, + "staticDefault": { + "type": ["null", "boolean"] + } + } +} diff --git a/archive/tap_hubspot/schemas/deals.json b/archive/tap_hubspot/schemas/deals.json new file mode 100644 index 0000000..a6cda1d --- /dev/null +++ b/archive/tap_hubspot/schemas/deals.json @@ -0,0 +1,37 @@ +{ + "type": "object", + "properties": { + "portalId": { + "type": ["null", "integer"] + }, + "dealId": { + "type": ["null", "integer"] + }, + "isDeleted": { + "type": ["null", "boolean"] + }, + "associations": { + "type": ["null", "object"], + "properties": { + "associatedVids": { + "type": ["null", "array"], + "items": { + "type": ["null", "integer"] + } + }, + "associatedCompanyIds": { + "type": ["null", "array"], + "items": { + "type": ["null", "integer"] + } + }, + "associatedDealIds": { + "type": ["null", "array"], + "items": { + "type": ["null", "integer"] + } + } + } + } + } +} diff --git a/archive/tap_hubspot/schemas/email_events.json b/archive/tap_hubspot/schemas/email_events.json new file mode 100644 index 0000000..e74aa07 --- /dev/null +++ b/archive/tap_hubspot/schemas/email_events.json @@ -0,0 +1,118 @@ +{ + "type": "object", + "properties": { + "appId": { + "type": ["null", "integer"] + }, + "appName": { + "type": ["null", "string"] + }, + "browser": { + "type": ["null", "object"], + "properties": { + "family": { + "type": ["null", "string"] + }, + "name": { + "type": ["null", "string"] + }, + "producer": { + "type": ["null", "string"] + }, + "producerUrl": { + "type": ["null", "string"] + }, + "type": { + "type": ["null", "string"] + }, + "url": { + "type": ["null", "string"] + } + } + }, + "created": { + "type": ["null", "string"], + "format": "date-time" + }, + "deviceType": { + "type": ["null", "string"] + }, + "duration": { + "type": ["null", "integer"] + }, + "emailCampaignId": { + "type": ["null", "integer"] + }, + "emailCampaignGroupId": { + "type": ["null", "integer"] + }, + "filteredEvent": { + "type": ["null", "boolean"] + }, + "from": { + "type": ["null", "string"] + }, + "hmid": { + "type": ["null", "string"] + }, + "id": { + "type": ["null", "string"] + }, + "ipAddress": { + "type": ["null", "string"] + }, + "linkId": { + "type": ["null", "integer"] + }, + "location": { + "type": ["null", "object"], + "properties": { + "city": { + "type": ["null", "string"] + }, + "country": { + "type": ["null", "string"] + }, + "state": { + "type": ["null", "string"] + } + } + }, + "portalId": { + "type": ["null", "integer"] + }, + "recipient": { + "type": ["null", "string"] + }, + "response": { + "type": ["null", "string"] + }, + "sentBy": { + "type": ["null", "object"], + "properties": { + "created": { + "type": ["null", "string"], + "format": "date-time" + }, + "id": { + "type": ["null", "string"] + } + } + }, + "smtpId": { + "type": ["null", "string"] + }, + "subject": { + "type": ["null", "string"] + }, + "type": { + "type": ["null", "string"] + }, + "url": { + "type": ["null", "string"] + }, + "userAgent": { + "type": ["null", "string"] + } + } +} diff --git a/archive/tap_hubspot/schemas/engagements.json b/archive/tap_hubspot/schemas/engagements.json new file mode 100644 index 0000000..71be960 --- /dev/null +++ b/archive/tap_hubspot/schemas/engagements.json @@ -0,0 +1,179 @@ +{ + "type": "object", + "properties": { + "engagement_id": { + "type": "integer" + }, + "lastUpdated": { + "type": ["null", "string"], + "format": "date-time" + }, + "engagement": { + "type": "object", + "properties": { + "id": { + "type": "integer" + }, + "portalId": { + "type": "integer" + }, + "active": { + "type": "boolean" + }, + "createdAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "lastUpdated": { + "type": ["null", "string"], + "format": "date-time" + }, + "ownerId": { + "type": "integer" + }, + "type": { + "type": "string" + }, + "timestamp": { + "type": ["null", "string"], + "format": "date-time" + } + } + }, + "associations": { + "type": ["null", "object"], + "properties": { + "contactIds": { + "type": ["null", "array"], + "items": { + "type": "integer" + } + }, + "companyIds": { + "type": ["null", "array"], + "items": { + "type": "integer" + } + }, + "dealIds": { + "type": ["null", "array"], + "items": { + "type": "integer" + } + } + } + }, + "attachments": { + "type": ["null", "array"], + "items": { + "type": "object", + "properties": { + "id": { + "type": "integer" + } + } + } + }, + "metadata": { + "type": ["null", "object"], + "properties": { + "body": { + "type": ["null", "string"] + }, + "from": { + "type": ["null", "object"], + "properties": { + "email": { + "type": "string" + }, + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + } + } + }, + "to": { + "type": ["null", "array"], + "items": { + "type": "object", + "properties": { + "email": { + "type": "string" + } + } + } + }, + "cc": { + "type": ["null", "array"], + "items": { + "type": "object", + "properties": { + "email": { + "type": "string" + } + } + } + }, + "bcc": { + "type": ["null", "array"], + "items": { + "type": "object", + "properties": { + "email": { + "type": "string" + } + } + } + }, + "subject": { + "type": ["null", "string"] + }, + "html": { + "type": ["null", "string"] + }, + "text": { + "type": ["null", "string"] + }, + "status": { + "type": ["null", "string"] + }, + "forObjectType": { + "type": ["null", "string"] + }, + "startTime": { + "type": ["null", "integer"] + }, + "endTime": { + "type": ["null", "integer"] + }, + "title": { + "type": ["null", "string"] + }, + "toNumber": { + "type": ["null", "string"] + }, + "fromNumber": { + "type": ["null", "string"] + }, + "externalId": { + "type": ["null", "string"] + }, + "durationMilliseconds": { + "type": ["null", "integer"] + }, + "externalAccountId": { + "type": ["null", "string"] + }, + "recordingUrl": { + "type": ["null", "string"], + "format": "uri" + }, + "disposition": { + "type": ["null", "string"] + } + } + } + } +} diff --git a/archive/tap_hubspot/schemas/forms.json b/archive/tap_hubspot/schemas/forms.json new file mode 100644 index 0000000..61fcaa9 --- /dev/null +++ b/archive/tap_hubspot/schemas/forms.json @@ -0,0 +1,229 @@ +{ + "type": "object", + "properties": { + "deletedAt": { + "type": ["null", "integer"] + }, + "portalId": { + "type": ["null", "integer"] + }, + "guid": { + "type": ["null", "string"] + }, + "name": { + "type": ["null", "string"] + }, + "action": { + "type": ["null", "string"] + }, + "method": { + "type": ["null", "string"] + }, + "cssClass": { + "type": ["null", "string"] + }, + "redirect": { + "type": ["null", "string"] + }, + "submitText": { + "type": ["null", "string"] + }, + "followUpId": { + "type": ["null", "string"] + }, + "notifyRecipients": { + "type": ["null", "string"] + }, + "leadNurturingCampaignId": { + "type": ["null", "string"] + }, + "formFieldGroups": { + "type": "array", + "items": { + "type": "object", + "properties": { + "fields": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": ["null", "string"] + }, + "label": { + "type": ["null", "string"] + }, + "type": { + "type": ["null", "string"] + }, + "fieldType": { + "type": ["null", "string"] + }, + "description": { + "type": ["null", "string"] + }, + "groupName": { + "type": ["null", "string"] + }, + "displayOrder": { + "type": ["null", "integer"] + }, + "required": { + "type": ["null", "boolean"] + }, + "validation": { + "type": "object", + "properties": { + "name": { + "type": ["null", "string"] + }, + "message": { + "type": ["null", "string"] + }, + "data": { + "type": ["null", "string"] + }, + "useDefaultBlockList": { + "type": ["null", "boolean"] + }, + "blockedEmailAddresses": { + "type": "array", + "items": { + "type": ["null", "string"] + } + } + } + }, + "enabled": { + "type": ["null", "boolean"] + }, + "hidden": { + "type": ["null", "boolean"] + }, + "defaultValue": { + "type": ["null", "string"] + }, + "isSmartField": { + "type": ["null", "boolean"] + }, + "unselectedLabel": { + "type": ["null", "string"] + }, + "placeholder": { + "type": ["null", "string"] + }, + "labelHidden": { + "type": ["null", "boolean"] + }, + "options": { + "type": "array", + "items": { + "type": "object", + "properties": { + "description": { + "type": ["null", "string"] + }, + "displayOrder": { + "type": ["null", "integer"] + }, + "doubleData": { + "type": ["null", "number"] + }, + "hidden" : { + "type": ["null", "boolean"] + }, + "label": { + "type": ["null", "string"] + }, + "readOnly": { + "type": ["null", "boolean"] + }, + "value": { + "type": ["null", "string"] + } + } + } + }, + "selectedOptions": { + "type": "array", + "items": { + "type" : ["null", "string"] + } + } + } + } + }, + "default": { + "type": ["null", "boolean"] + }, + "isSmartGroup": { + "type": ["null", "boolean"] + }, + "richText": { + "type": "object", + "properties": { + "content": { + "type": ["null", "string"] + } + } + } + } + } + }, + "createdAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "updatedAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "performableHtml": { + "type": ["null", "string"] + }, + "migratedFrom": { + "type": ["null", "string"] + }, + "ignoreCurrentValues": { + "type": ["null", "boolean"] + }, + "deletable": { + "type": ["null", "boolean"] + }, + "inlineMessage": { + "type": ["null", "string"] + }, + "tmsId": { + "type": ["null", "string"] + }, + "captchaEnabled": { + "type": ["null", "boolean"] + }, + "campaignGuid": { + "type": ["null", "string"] + }, + "cloneable": { + "type": ["null", "boolean"] + }, + "editable": { + "type": ["null", "boolean"] + }, + "formType": { + "type": ["null", "string"] + }, + "metaData": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": ["null", "string"] + }, + "value": { + "type": ["null", "string"] + } + } + } + } + } +} diff --git a/archive/tap_hubspot/schemas/owners.json b/archive/tap_hubspot/schemas/owners.json new file mode 100644 index 0000000..2e3d61d --- /dev/null +++ b/archive/tap_hubspot/schemas/owners.json @@ -0,0 +1,72 @@ +{ + "type": "object", + "properties": { + "portalId": { + "type": ["null", "integer"] + }, + "ownerId": { + "type": ["null", "integer"] + }, + "type": { + "type": ["null", "string"] + }, + "firstName": { + "type": ["null", "string"] + }, + "lastName": { + "type": ["null", "string"] + }, + "email": { + "type": ["null", "string"] + }, + "createdAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "signature": { + "type": ["null", "string"] + }, + "updatedAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "hasContactsAccess" : { + "type": ["null", "boolean"] + }, + "isActive": { + "type": ["null", "boolean"] + }, + "activeUserId" : { + "type": ["null", "integer"] + }, + "userIdIncludingInactive" : { + "type": ["null", "integer"] + }, + "remoteList": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": ["null", "integer"] + }, + "portalId": { + "type": ["null", "integer"] + }, + "ownerId": { + "type": ["null", "integer"] + }, + "remoteId": { + "type": ["null", "string"] + }, + "remoteType": { + "type": ["null", "string"] + }, + "active": { + "type": ["null", "boolean"] + } + } + } + } + } +} diff --git a/archive/tap_hubspot/schemas/subscription_changes.json b/archive/tap_hubspot/schemas/subscription_changes.json new file mode 100644 index 0000000..1db687d --- /dev/null +++ b/archive/tap_hubspot/schemas/subscription_changes.json @@ -0,0 +1,54 @@ +{ + "type": "object", + "properties": { + "timestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "portalId": { + "type": ["null", "integer"] + }, + "recipient": { + "type": ["null", "string"] + }, + "changes": { + "type": ["null", "array"], + "items": { + "type": ["null", "object"], + "properties": { + "change": { + "type": ["null", "string"] + }, + "timestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "source": { + "type": ["null", "string"] + }, + "portalId": { + "type": ["null", "integer"] + }, + "subscriptionId": { + "type": ["null", "integer"] + }, + "changeType": { + "type": ["null", "string"] + }, + "causedByEvent": { + "type": ["null", "object"], + "properties": { + "id": { + "type": ["null", "string"] + }, + "created": { + "type": ["null", "string"], + "format": "date-time" + } + } + } + } + } + } + } +} diff --git a/archive/tap_hubspot/schemas/tickets.json b/archive/tap_hubspot/schemas/tickets.json new file mode 100644 index 0000000..264c567 --- /dev/null +++ b/archive/tap_hubspot/schemas/tickets.json @@ -0,0 +1,138 @@ +{ + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "createdAt": { + "type": [ + "null", + "string" + ], + "format": "date-time" + }, + "updatedAt": { + "type": [ + "null", + "string" + ], + "format": "date-time" + }, + "archived": { + "type": [ + "null", + "boolean" + ] + }, + "associations": { + "type": [ + "null", + "object" + ], + "properties": { + "companies": { + "type": [ + "null", + "object" + ], + "properties": { + "results": { + "type": [ + "null", + "array" + ], + "items": { + "type": [ + "null", + "object" + ], + "properties": { + "id": { + "type": [ + "null", + "string" + ] + }, + "type": { + "type": [ + "null", + "string" + ] + } + } + } + } + } + }, + "deals": { + "type": [ + "null", + "object" + ], + "properties": { + "results": { + "type": [ + "null", + "array" + ], + "items": { + "type": [ + "null", + "object" + ], + "properties": { + "id": { + "type": [ + "null", + "string" + ] + }, + "type": { + "type": [ + "null", + "string" + ] + } + } + } + } + } + }, + "contacts": { + "type": [ + "null", + "object" + ], + "properties": { + "results": { + "type": [ + "null", + "array" + ], + "items": { + "type": [ + "null", + "object" + ], + "properties": { + "id": { + "type": [ + "null", + "string" + ] + }, + "type": { + "type": [ + "null", + "string" + ] + } + } + } + } + } + } + } + } + } +} diff --git a/archive/tap_hubspot/schemas/versions.json b/archive/tap_hubspot/schemas/versions.json new file mode 100644 index 0000000..f725655 --- /dev/null +++ b/archive/tap_hubspot/schemas/versions.json @@ -0,0 +1,30 @@ +{ + "type": "array", + "items": { + "type": ["null", "object"], + "properties": { + "name": { + "type": ["null", "string"] + }, + "value": { + "type": ["null", "string"] + }, + "timestamp": { + "type": ["null", "string"], + "format": "date-time" + }, + "source": { + "type": ["null", "string"] + }, + "sourceId": { + "type": ["null", "string"] + }, + "sourceVid": { + "type": ["null", "array"], + "items": { + "type": ["null", "string"] + } + } + } + } +} diff --git a/archive/tap_hubspot/schemas/workflows.json b/archive/tap_hubspot/schemas/workflows.json new file mode 100644 index 0000000..a72491a --- /dev/null +++ b/archive/tap_hubspot/schemas/workflows.json @@ -0,0 +1,48 @@ +{ + "type": "object", + "properties": { + "name": { + "type": ["null", "string"] + }, + "id": { + "type": ["null", "integer"] + }, + "type": { + "type": ["null", "string"] + }, + "enabled": { + "type": ["null", "boolean"] + }, + "insertedAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "updatedAt": { + "type": ["null", "string"], + "format": "date-time" + }, + "personaTagIds": { + "type": "array", + "items": { + "type": "integer" + } + }, + "contactListIds": { + "type": "object", + "properties": { + "enrolled": { + "type": ["null", "integer"] + }, + "active": { + "type": ["null", "integer"] + }, + "steps": { + "type": ["null", "array"], + "items": { + "type": ["null", "string"] + } + } + } + } + } +} diff --git a/archive/tap_hubspot/tests/__init__.py b/archive/tap_hubspot/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/archive/tap_hubspot/tests/test_bookmarks.py b/archive/tap_hubspot/tests/test_bookmarks.py new file mode 100644 index 0000000..cf01c4f --- /dev/null +++ b/archive/tap_hubspot/tests/test_bookmarks.py @@ -0,0 +1,62 @@ +import unittest +import singer.messages +import tap_hubspot +from tap_hubspot.tests import utils + +LOGGER = singer.get_logger() + +class Bookmarks(unittest.TestCase): + def setUp(self): + utils.verify_environment_vars() + utils.seed_tap_hubspot_config() + utils.write_to_singer() + + #NB> test account must have > 2 contacts for this to work + def sync_contacts(self): + STATE = utils.get_clear_state() + catalog = {'stream_alias': 'hubspot_contacts'} + + tap_hubspot.default_contact_params['count'] = 1 + + STATE = tap_hubspot.sync_contacts(STATE, catalog) + #offset has been cleared + self.assertEqual(utils.caught_state['bookmarks']['contacts']['offset'], {}) + + #some bookmark has been recorded in the state + self.assertNotEqual(utils.caught_state['bookmarks']['contacts']['lastmodifieddate'], None) + + #should sync some contacts + # LOGGER.info('A caught record: {}'.format(utils.caught_records['contacts'][0])) + self.assertGreater(len(utils.caught_records['contacts']), 1) + self.assertEqual(set(utils.caught_records.keys()), {'contacts'}) + self.assertEqual(utils.caught_pks, {'contacts': ['vid']}) + + utils.caught_records = [] + STATE = tap_hubspot.sync_contacts(STATE, catalog) + + #no new records thanks to bookmark + self.assertEqual(len(utils.caught_records), 0) + + def sync_companies(self): + STATE = utils.get_clear_state() + + catalog = {'stream_alias': 'hubspot_companies'} + STATE = tap_hubspot.sync_companies(STATE, catalog) + + #offset has been cleared + self.assertEqual(utils.caught_state['bookmarks']['companies']['offset'], {}) + + #some bookmark has been recorded in the state + self.assertNotEqual(utils.caught_state['bookmarks']['companies']['hs_lastmodifieddate'], None) + + #should sync some contacts && some hubspot_contacts_by_company + self.assertGreater(len(utils.caught_records), 0) + self.assertEqual(set(utils.caught_records.keys()), {'companies', 'hubspot_contacts_by_company'}) + + self.assertEqual(utils.caught_pks, {'companies': ['companyId'], 'hubspot_contacts_by_company': ['company-id', 'contact-id']}) + + utils.caught_records = [] + STATE = tap_hubspot.sync_companies(STATE, catalog) + + #no new records thanks to bookmark + self.assertEqual(len(utils.caught_records), 0) diff --git a/archive/tap_hubspot/tests/test_deals.py b/archive/tap_hubspot/tests/test_deals.py new file mode 100644 index 0000000..bc63d1d --- /dev/null +++ b/archive/tap_hubspot/tests/test_deals.py @@ -0,0 +1,34 @@ +from tap_hubspot import sync_deals +from unittest.mock import patch, ANY + + +@patch('builtins.min') +@patch('tap_hubspot.Context.get_catalog_from_id', return_value={"metadata": ""}) +@patch('singer.metadata.to_map', return_value={}) +@patch('singer.utils.strptime_with_tz') +@patch('singer.utils.strftime') +@patch('tap_hubspot.load_schema') +@patch('tap_hubspot.gen_request', return_value=[]) +def test_associations_are_not_validated(mocked_gen_request, mocked_catalog_from_id, mocked_metadata_map, mocked_utils_strptime, mocked_utils_strftime, mocked_load_schema, mocked_min): + # pylint: disable=unused-argument + sync_deals({}, mocked_catalog_from_id) + + expected_param = {'includeAssociations': False, 'properties': [], 'limit': 100} + + mocked_gen_request.assert_called_once_with(ANY, ANY, ANY, expected_param, ANY, ANY, ANY, ANY, v3_fields=None) + + +@patch('builtins.min') +@patch('tap_hubspot.Context.get_catalog_from_id', return_value={"metadata": ""}) +@patch('singer.metadata.to_map', return_value={"associations": {"selected": True}}) +@patch('singer.utils.strptime_with_tz') +@patch('singer.utils.strftime') +@patch('tap_hubspot.load_schema') +@patch('tap_hubspot.gen_request', return_value=[]) +def test_associations_are_validated(mocked_gen_request, mocked_catalog_from_id, mocked_metadata_map, mocked_utils_strptime, mocked_utils_strftime, mocked_load_schema, mocked_min): + # pylint: disable=unused-argument + sync_deals({}, mocked_catalog_from_id) + + expected_param = {'includeAssociations': True, 'properties': [], 'limit': 100} + + mocked_gen_request.assert_called_once_with(ANY, ANY, ANY, expected_param, ANY, ANY, ANY, ANY, v3_fields=None) diff --git a/archive/tap_hubspot/tests/test_get_streams_to_sync.py b/archive/tap_hubspot/tests/test_get_streams_to_sync.py new file mode 100644 index 0000000..394f190 --- /dev/null +++ b/archive/tap_hubspot/tests/test_get_streams_to_sync.py @@ -0,0 +1,44 @@ +import unittest +from tap_hubspot import get_streams_to_sync, parse_source_from_url, Stream + + +class TestGetStreamsToSync(unittest.TestCase): + + def setUp(self): + self.streams = [ + Stream('a', 'a', [], None, None), + Stream('b', 'b', [], None, None), + Stream('c', 'c', [], None, None), + ] + + def test_get_streams_to_sync_with_no_this_stream(self): + state = {'this_stream': None} + self.assertEqual(self.streams, get_streams_to_sync(self.streams, state)) + + def test_get_streams_to_sync_with_first_stream(self): + state = {'currently_syncing': 'a'} + + result = get_streams_to_sync(self.streams, state) + + parsed_result = [s.tap_stream_id for s in result] + self.assertEqual(parsed_result, ['a', 'b', 'c']) + + def test_get_streams_to_sync_with_middle_stream(self): + state = {'currently_syncing': 'b'} + + result = get_streams_to_sync(self.streams, state) + + parsed_result = [s.tap_stream_id for s in result] + self.assertEqual(parsed_result, ['b', 'c', 'a']) + + def test_get_streams_to_sync_with_last_stream(self): + state = {'currently_syncing': 'c'} + + result = get_streams_to_sync(self.streams, state) + + parsed_result = [s.tap_stream_id for s in result] + self.assertEqual(parsed_result, ['c', 'a', 'b']) + + def test_parse_source_from_url_succeeds(self): + url = "https://api.hubapi.com/companies/v2/companies/recent/modified" + self.assertEqual('companies', parse_source_from_url(url)) diff --git a/archive/tap_hubspot/tests/test_offsets.py b/archive/tap_hubspot/tests/test_offsets.py new file mode 100644 index 0000000..5b8c588 --- /dev/null +++ b/archive/tap_hubspot/tests/test_offsets.py @@ -0,0 +1,57 @@ +import unittest +import singer +import tap_hubspot +import singer.bookmarks +from tap_hubspot.tests import utils + +LOGGER = singer.get_logger() + +def set_offset_with_exception(state, tap_stream_id, offset_key, offset_value): + LOGGER.info("set_offset_with_exception: %s", utils.caught_state) + utils.caught_state = singer.bookmarks.set_offset(state, tap_stream_id, offset_key, offset_value) + raise Exception("simulated") + +class Offsets(unittest.TestCase): + def setUp(self): + utils.verify_environment_vars() + utils.seed_tap_hubspot_config() + utils.write_to_singer() + singer.set_offset = set_offset_with_exception + + #NB> test accounts must have > 1 companies for this to work + def sync_companies(self): + simulated_exception = None + STATE = utils.get_clear_state() + catalog = {'stream_alias': 'hubspot_companies'} + + #change count = 1 + tap_hubspot.default_company_params['limit'] = 1 + + try: + STATE = tap_hubspot.sync_companies(STATE, catalog) + except Exception as ex: + simulated_exception = ex + # logging.exception('strange') + + self.assertIsNot(simulated_exception, None) + + + self.assertEqual(set(utils.caught_records.keys()), {'companies', 'hubspot_contacts_by_company'}) + + #should only emit 1 company record because of the limit + self.assertEqual(len(utils.caught_records['companies']), 1) + self.assertGreater(len(utils.caught_records['hubspot_contacts_by_company']), 0) + + #offset should be set in state + LOGGER.info("utils.caught_state: %s", utils.caught_state) + self.assertNotEqual(utils.caught_state['bookmarks']['companies']['offset'], {}) + + #no bookmark though + self.assertEqual(utils.caught_state['bookmarks']['companies']['hs_lastmodifieddate'], None) + + #change count back to 250 + tap_hubspot.default_company_params['limit'] = 250 + + #call do_sync and verify: + # 1)sync_companies is called first + # 2)previous retrieved record is not retrieved again diff --git a/archive/tap_hubspot/tests/unittests/test_get_start.py b/archive/tap_hubspot/tests/unittests/test_get_start.py new file mode 100644 index 0000000..42fed6e --- /dev/null +++ b/archive/tap_hubspot/tests/unittests/test_get_start.py @@ -0,0 +1,94 @@ +import unittest +import tap_hubspot +from tap_hubspot import get_start +from tap_hubspot import singer + +def get_state(key,value): + """ + Returns a mock state + """ + return { + "bookmarks": { + "stream_id_1": { + "offset": {}, + key: value + } + } + } + +class TestGetStart(unittest.TestCase): + """ + Verify return value of `get_start` function. + """ + def test_get_start_without_state(self): + """ + This test verifies that `get_start` function returns start_date from CONFIG + if an empty state is passed. + """ + mock_state = {} + expected_value = tap_hubspot.CONFIG["start_date"] + returned_value = get_start(mock_state, "stream_id_1", "current_bookmark", "old_bookmark") + + # Verify that returned value is start_date + self.assertEqual(returned_value, expected_value) + + def test_get_start_with_old_bookmark(self): + """ + This test verifies that the `get_start` function returns old_bookmark from the state + if current_bookmark is not available in the state. + """ + mock_state = get_state("old_bookmark", "OLD_BOOKMARK_VALUE") + expected_value = "OLD_BOOKMARK_VALUE" + + returned_value = get_start(mock_state, "stream_id_1", "current_bookmark", "old_bookmark") + + # Verify that returned value is old_bookmark_value + self.assertEqual(returned_value, expected_value) + + def test_get_start_with_current_bookmark_and_no_old_bookmark(self): + """ + This test verifies that the `get_start` function returns current_bookmark from the state + if current_bookmark is available in the state and old_bookmark is not given. + """ + mock_state = get_state("current_bookmark", "CURR_BOOKMARK_VALUE") + expected_value = "CURR_BOOKMARK_VALUE" + + returned_value = get_start(mock_state, "stream_id_1", "current_bookmark") + + # Verify that returned value is current bookmark + self.assertEqual(returned_value, expected_value) + + def test_get_start_with_empty_start__no_old_bookmark(self): + """ + This test verifies that the `get_start` function returns start_date from CONFIG + if an empty state is passed and old_bookamrk is not given. + """ + mock_state = {} + expected_value = tap_hubspot.CONFIG["start_date"] + + returned_value = get_start(mock_state, "stream_id_1", "current_bookmark") + + # Verify that returned value is start_date + self.assertEqual(returned_value, expected_value) + + def test_get_start_with_both_bookmark(self): + """ + This test verifies that the `get_start` function returns current_bookmark from the state + if both old and current bookmark is available in the state. + """ + + mock_state = { + "bookmarks": { + "stream_id_1": { + "offset": {}, + "old_bookmark": "OLD_BOOKMARK_VALUE", + "current_bookmark": "CURR_BOOKMARK_VALUE" + } + } + } + expected_value = "CURR_BOOKMARK_VALUE" + + returned_value = get_start(mock_state, "stream_id_1", "current_bookmark", "old_bookmark") + + # Verify that returned value is current bookmark + self.assertEqual(returned_value, expected_value) diff --git a/archive/tap_hubspot/tests/unittests/test_request_timeout.py b/archive/tap_hubspot/tests/unittests/test_request_timeout.py new file mode 100644 index 0000000..33d5456 --- /dev/null +++ b/archive/tap_hubspot/tests/unittests/test_request_timeout.py @@ -0,0 +1,121 @@ +import unittest +import requests +from unittest import mock +import tap_hubspot +class TestRequestTimeoutValue(unittest.TestCase): + + def test_integer_request_timeout_in_config(self): + """ + Verify that if request_timeout is provided in config(integer value) then it should be use + """ + tap_hubspot.CONFIG.update({"request_timeout": 100}) # integer timeout in config + + request_timeout = tap_hubspot.get_request_timeout() + + self.assertEqual(request_timeout, 100.0) # Verify timeout value + + def test_float_request_timeout_in_config(self): + """ + Verify that if request_timeout is provided in config(float value) then it should be use + """ + tap_hubspot.CONFIG.update({"request_timeout": 100.5}) # float timeout in config + + request_timeout = tap_hubspot.get_request_timeout() + + self.assertEqual(request_timeout, 100.5) # Verify timeout value + + def test_string_request_timeout_in_config(self): + """ + Verify that if request_timeout is provided in config(string value) then it should be use + """ + tap_hubspot.CONFIG.update({"request_timeout": "100"}) # string format timeout in config + + request_timeout = tap_hubspot.get_request_timeout() + + self.assertEqual(request_timeout, 100.0) # Verify timeout value + + def test_empty_string_request_timeout_in_config(self): + """ + Verify that if request_timeout is provided in config with empty string then default value is used + """ + tap_hubspot.CONFIG.update({"request_timeout": ""}) # empty string in config + + request_timeout = tap_hubspot.get_request_timeout() + + self.assertEqual(request_timeout, 300) # Verify timeout value + + def test_zero_request_timeout_in_config(self): + """ + Verify that if request_timeout is provided in config with zero value then default value is used + """ + tap_hubspot.CONFIG.update({"request_timeout": 0}) # zero value in config + + request_timeout = tap_hubspot.get_request_timeout() + + self.assertEqual(request_timeout, 300) # Verify timeout value + + def test_zero_string_request_timeout_in_config(self): + """ + Verify that if request_timeout is provided in config with zero in string format then default value is used + """ + tap_hubspot.CONFIG.update({"request_timeout": '0'}) # zero value in config + + request_timeout = tap_hubspot.get_request_timeout() + + self.assertEqual(request_timeout, 300) # Verify timeout value + + def test_no_request_timeout_in_config(self): + """ + Verify that if request_timeout is not provided in config then default value is used + """ + tap_hubspot.CONFIG = {} + request_timeout = tap_hubspot.get_request_timeout() + + self.assertEqual(request_timeout, 300) # Verify timeout value + + +@mock.patch("time.sleep") +class TestRequestTimeoutBackoff(unittest.TestCase): + + @mock.patch('requests.Session.send', side_effect = requests.exceptions.Timeout) + @mock.patch("requests.Request.prepare") + @mock.patch('tap_hubspot.get_params_and_headers', return_value = ({}, {})) + def test_request_timeout_backoff(self, mocked_get, mocked_prepare, mocked_send, mocked_sleep): + """ + Verify request function is backoff for only 5 times on Timeout exception. + """ + try: + tap_hubspot.request('dummy_url', {}) + except Exception: + pass + + # Verify that Session.send is called 5 times + self.assertEqual(mocked_send.call_count, 5) + + @mock.patch('tap_hubspot.get_params_and_headers', return_value = ({}, {})) + @mock.patch('requests.post', side_effect = requests.exceptions.Timeout) + def test_request_timeout_backoff_for_post_search_endpoint(self, mocked_post, mocked_get, mocked_sleep): + """ + Verify post_search_endpoint function is backoff for only 5 times on Timeout exception. + """ + try: + tap_hubspot.post_search_endpoint('dummy_url', {}) + except Exception: + pass + + # Verify that requests.post is called 5 times + self.assertEqual(mocked_post.call_count, 5) + + @mock.patch('requests.post', side_effect = requests.exceptions.Timeout) + def test_request_timeout_backoff_for_acquire_access_token_from_refresh_token(self, mocked_post, mocked_sleep): + """ + Verify request function is backoff for only 5 times instead of 25 times on Timeout exception that thrown from `acquire_access_token_from_refresh_token` method. + Here get_params_and_headers method called from request method and acquire_access_token_from_refresh_token called from get_params_and_headers method. + """ + try: + tap_hubspot.post_search_endpoint('dummy_url', {}) + except Exception: + pass + + # Verify that requests.post is called 5 times + self.assertEqual(mocked_post.call_count, 5) diff --git a/archive/tap_hubspot/tests/unittests/test_tickets.py b/archive/tap_hubspot/tests/unittests/test_tickets.py new file mode 100644 index 0000000..38e3490 --- /dev/null +++ b/archive/tap_hubspot/tests/unittests/test_tickets.py @@ -0,0 +1,147 @@ +import unittest +from unittest.mock import patch + +from tap_hubspot import sync_tickets + +mock_response_data = { + "results": [{ + "updatedAt": "2022-08-18T12:57:17.587Z", + "createdAt": "2019-08-06T02:43:01.930Z", + "name": "hs_file_upload", + "label": "File upload", + "type": "string", + "fieldType": "file", + "description": "Files attached to a support form by a contact.", + "groupName": "ticketinformation", + "options": [], + "displayOrder": -1, + "calculated": False, + "externalOptions": False, + "hasUniqueValue": False, + "hidden": False, + "hubspotDefined": True, + "modificationMetadata": { + "archivable": True, + "readOnlyDefinition": True, + "readOnlyValue": False + }, + "formField": True + }] +} + + +class MockResponse: + + def __init__(self, json_data): + self.json_data = json_data + + def json(self): + return self.json_data + + +class MockContext: + def get_catalog_from_id(self, stream_name): + return { + "stream": "tickets", + "tap_stream_id": "tickets", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "updatedAt": { + "type": [ + "null", + "string" + ], + "format": "date-time" + }, + "properties": { + "type": "object", + "properties": { + "hs_all_team_ids": { + "type": [ + "null", + "string" + ] + } + } + }, + "property_hs_all_team_ids": { + "type": [ + "null", + "string" + ] + } + } + }, + "metadata": [{ + "breadcrumb": [], + "metadata": { + "table-key-properties": ["id"], + "forced-replication-method": "INCREMENTAL", + "valid-replication-keys": [ + "updatedAt" + ], + "selected": True + } + }, + { + "breadcrumb": ["properties", "id"], + "metadata": { + "inclusion": "automatic" + } + }, + + { + "breadcrumb": ["properties", "updatedAt"], + "metadata": { + "inclusion": "automatic" + } + }, + { + "breadcrumb": ["properties", "properties"], + "metadata": { + "inclusion": "available" + } + }, + + { + "breadcrumb": ["properties", "property_hs_all_team_ids"], + "metadata": { + "inclusion": "available", + "selected": True + } + } + ] + } + + +class TestTickets(unittest.TestCase): + + @patch('tap_hubspot.request', return_value=MockResponse(mock_response_data)) + @patch('tap_hubspot.get_start', return_value='2023-01-01T00:00:00Z') + @patch('tap_hubspot.gen_request_tickets') + def test_ticket_params_are_validated(self, mocked_gen_request, mocked_get_start, + mock_request_response): + """ + # Validating the parameters passed while making the API request to list the tickets + """ + mock_context = MockContext() + expected_param = {'limit': 100, + 'associations': 'contact,company,deals', + 'properties': 'hs_all_team_ids', + 'archived': False + } + expected_return_value = {'currently_syncing': 'tickets', 'bookmarks': { + 'tickets': {'updatedAt': '2023-01-01T00:00:00.000000Z'}}} + + return_value = sync_tickets({'currently_syncing': 'tickets'}, mock_context) + self.assertEqual( + expected_return_value, + return_value + ) + mocked_gen_request.assert_called_once_with('tickets', + 'https://api.hubapi.com/crm/v4/objects/tickets', + expected_param, 'results', 'paging') diff --git a/archive/tap_hubspot/tests/utils.py b/archive/tap_hubspot/tests/utils.py new file mode 100644 index 0000000..6c3e481 --- /dev/null +++ b/archive/tap_hubspot/tests/utils.py @@ -0,0 +1,80 @@ +import singer +import singer.bookmarks +import os +import tap_hubspot + +LOGGER = singer.get_logger() + +caught_records = {} +caught_bookmarks = [] +caught_state = {} +caught_schema = {} +caught_pks = {} + + +def verify_environment_vars(): + missing_envs = [x for x in [os.getenv('TAP_HUBSPOT_REDIRECT_URI'), + os.getenv('TAP_HUBSPOT_CLIENT_ID'), + os.getenv('TAP_HUBSPOT_CLIENT_SECRET'), + os.getenv('TAP_HUBSPOT_REFRESH_TOKEN')] if x is None] + if len(missing_envs) != 0: + #pylint: disable=line-too-long + raise Exception("set TAP_HUBSPOT_REDIRECT_URI, TAP_HUBSPOT_CLIENT_ID, TAP_HUBSPOT_CLIENT_SECRET, TAP_HUBSPOT_REFRESH_TOKEN") + +def seed_tap_hubspot_config(): + tap_hubspot.CONFIG = { + "access_token": None, + "token_expires": None, + + "redirect_uri": os.environ['TAP_HUBSPOT_REDIRECT_URI'], + "client_id": os.environ['TAP_HUBSPOT_CLIENT_ID'], + "client_secret": os.environ['TAP_HUBSPOT_CLIENT_SECRET'], + "refresh_token": os.environ['TAP_HUBSPOT_REFRESH_TOKEN'], + "start_date": "2001-01-01T00:00:00Z" + } + +def get_clear_state(): + return { + "bookmarks": { + "contacts": { + "offset": {}, + "lastmodifieddate": None + }, + "companies": { + "offset": {}, + "hs_lastmodifieddate": None + } + + }, + "currently_syncing": None + } + + +#pylint: disable=line-too-long +def our_write_bookmark(state, table_name, bookmark_key, bookmark_value): + caught_bookmarks.append([bookmark_key, bookmark_value]) + state = singer.bookmarks.write_bookmark(state, table_name, bookmark_key, bookmark_value) + return state + +def our_write_schema(table_name, schema, pks): + caught_pks[table_name] = pks + caught_schema[table_name] = schema + +def our_write_state(state): + # pylint: disable=global-statement + LOGGER.info("our_write_state: %s", state) + global caught_state + caught_state = state + return state + +def our_write_record(table_name, record): + if caught_records.get(table_name) is None: + caught_records[table_name] = [] + + caught_records[table_name].append(record) + +def write_to_singer(): + singer.write_bookmark = our_write_bookmark + singer.write_state = our_write_state + singer.write_record = our_write_record + singer.write_schema = our_write_schema diff --git a/archive/tests/base.py b/archive/tests/base.py new file mode 100644 index 0000000..769eac8 --- /dev/null +++ b/archive/tests/base.py @@ -0,0 +1,390 @@ +import os +import unittest +from datetime import datetime as dt +from datetime import timedelta + +import tap_tester.menagerie as menagerie +import tap_tester.connections as connections +import tap_tester.runner as runner +from tap_tester.base_case import BaseCase +from tap_tester import LOGGER + + +class HubspotBaseTest(BaseCase): + + REPLICATION_KEYS = "valid-replication-keys" + PRIMARY_KEYS = "table-key-properties" + FOREIGN_KEYS = "table-foreign-key-properties" + REPLICATION_METHOD = "forced-replication-method" + INCREMENTAL = "INCREMENTAL" + FULL = "FULL_TABLE" + + START_DATE_FORMAT = "%Y-%m-%dT00:00:00Z" # %H:%M:%SZ + BASIC_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" + + EXPECTED_PAGE_SIZE = "expected-page-size" + OBEYS_START_DATE = "obey-start-date" + PARENT_STREAM = "parent-stream" + + ####################################### + # Tap Configurable Metadata Methods # + ####################################### + + def setUp(self): + missing_envs = [x for x in [ + 'TAP_HUBSPOT_REDIRECT_URI', + 'TAP_HUBSPOT_CLIENT_ID', + 'TAP_HUBSPOT_CLIENT_SECRET', + 'TAP_HUBSPOT_REFRESH_TOKEN' + ] if os.getenv(x) is None] + if missing_envs: + raise Exception("Missing environment variables: {}".format(missing_envs)) + + @staticmethod + def get_type(): + return "platform.hubspot" + + @staticmethod + def tap_name(): + return "tap-hubspot" + + def get_properties(self): + start_date = dt.today() - timedelta(days=1) + start_date_with_fmt = dt.strftime(start_date, self.START_DATE_FORMAT) + + return {'start_date' : start_date_with_fmt} + + def get_credentials(self): + return {'refresh_token': os.getenv('TAP_HUBSPOT_REFRESH_TOKEN'), + 'client_secret': os.getenv('TAP_HUBSPOT_CLIENT_SECRET'), + 'redirect_uri': os.getenv('TAP_HUBSPOT_REDIRECT_URI'), + 'client_id': os.getenv('TAP_HUBSPOT_CLIENT_ID')} + + def expected_check_streams(self): + return set(self.expected_metadata().keys()) + + def expected_metadata(self): # DOCS_BUG https://stitchdata.atlassian.net/browse/DOC-1523) + """The expected streams and metadata about the streams""" + return { + "campaigns": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.FULL, + self.OBEYS_START_DATE: False + }, + "companies": { + self.PRIMARY_KEYS: {"companyId"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"property_hs_lastmodifieddate"}, + self.EXPECTED_PAGE_SIZE: 250, + self.OBEYS_START_DATE: True + }, + "contact_lists": { + self.PRIMARY_KEYS: {"listId"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updatedAt"}, + self.EXPECTED_PAGE_SIZE: 250, + self.OBEYS_START_DATE: True + }, + "contacts": { + self.PRIMARY_KEYS: {"vid"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"versionTimestamp"}, + self.EXPECTED_PAGE_SIZE: 100, + self.OBEYS_START_DATE: True + }, + "contacts_by_company": { + self.PRIMARY_KEYS: {"company-id", "contact-id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.EXPECTED_PAGE_SIZE: 100, + self.OBEYS_START_DATE: True, + self.PARENT_STREAM: 'companies' + }, + "deal_pipelines": { + self.PRIMARY_KEYS: {"pipelineId"}, + self.REPLICATION_METHOD: self.FULL, + self.OBEYS_START_DATE: False, + }, + "deals": { + self.PRIMARY_KEYS: {"dealId"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"property_hs_lastmodifieddate"}, + self.OBEYS_START_DATE: True + }, + "email_events": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"startTimestamp"}, + self.EXPECTED_PAGE_SIZE: 1000, + self.OBEYS_START_DATE: True + }, + "engagements": { + self.PRIMARY_KEYS: {"engagement_id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"lastUpdated"}, + self.EXPECTED_PAGE_SIZE: 250, + self.OBEYS_START_DATE: True + }, + "forms": { + self.PRIMARY_KEYS: {"guid"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updatedAt"}, + self.OBEYS_START_DATE: True + }, + "owners": { + self.PRIMARY_KEYS: {"ownerId"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updatedAt"}, + self.OBEYS_START_DATE: True # TODO is this a BUG? + }, + "subscription_changes": { + self.PRIMARY_KEYS: {"timestamp", "portalId", "recipient"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"startTimestamp"}, + self.EXPECTED_PAGE_SIZE: 1000, + self.OBEYS_START_DATE: True + }, + "workflows": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updatedAt"}, + self.OBEYS_START_DATE: True + }, + "tickets": { + self.PRIMARY_KEYS: {"id"}, + self.REPLICATION_METHOD: self.INCREMENTAL, + self.REPLICATION_KEYS: {"updatedAt"}, + self.EXPECTED_PAGE_SIZE: 100, + self.OBEYS_START_DATE: True + } + } + + ############################# + # Common Metadata Methods # + ############################# + + def expected_primary_keys(self): + """ + return a dictionary with key of table name + and value as a set of primary key fields + """ + return {table: properties.get(self.PRIMARY_KEYS, set()) + for table, properties + in self.expected_metadata().items()} + + + def expected_automatic_fields(self): + """ + return a dictionary with key of table name and value as the primary keys and replication keys + """ + pks = self.expected_primary_keys() + rks = self.expected_replication_keys() + + return {stream: rks.get(stream, set()) | pks.get(stream, set()) + for stream in self.expected_streams()} + + + def expected_replication_method(self): + """return a dictionary with key of table name and value of replication method""" + return {table: properties.get(self.REPLICATION_METHOD, None) + for table, properties + in self.expected_metadata().items()} + + def expected_streams(self): + """A set of expected stream names""" + return set(self.expected_metadata().keys()) + + def expected_replication_keys(self): + """ + return a dictionary with key of table name + and value as a set of replication key fields + """ + return {table: properties.get(self.REPLICATION_KEYS, set()) + for table, properties + in self.expected_metadata().items()} + + def expected_page_limits(self): + return {table: properties.get(self.EXPECTED_PAGE_SIZE, set()) + for table, properties + in self.expected_metadata().items()} + + def expected_primary_keys(self): + + """ + return a dictionary with key of table name + and value as a set of primary key fields + """ + return {table: properties.get(self.PRIMARY_KEYS, set()) + for table, properties + in self.expected_metadata().items()} + + def expected_automatic_fields(self): + auto_fields = {} + for k, v in self.expected_metadata().items(): + auto_fields[k] = v.get(self.PRIMARY_KEYS, set()) | v.get(self.REPLICATION_KEYS, set()) + return auto_fields + + ########################## + # Common Test Actions # + ########################## + + def create_connection_and_run_check(self, original_properties: bool = True): + """Create a new connection with the test name""" + # Create the connection + conn_id = connections.ensure_connection(self, original_properties) + + # Run a check job using orchestrator (discovery) + check_job_name = runner.run_check_mode(self, conn_id) + + # Assert that the check job succeeded + exit_status = menagerie.get_exit_status(conn_id, check_job_name) + menagerie.verify_check_exit_status(self, exit_status, check_job_name) + return conn_id + + def run_and_verify_check_mode(self, conn_id): + """ + Run the tap in check mode and verify it succeeds. + This should be ran prior to field selection and initial sync. + + Return the connection id and found catalogs from menagerie. + """ + # run in check mode + check_job_name = runner.run_check_mode(self, conn_id) + + # verify check exit codes + exit_status = menagerie.get_exit_status(conn_id, check_job_name) + menagerie.verify_check_exit_status(self, exit_status, check_job_name) + + found_catalogs = menagerie.get_catalogs(conn_id) + self.assertGreater(len(found_catalogs), 0, msg="unable to locate schemas for connection {}".format(conn_id)) + + found_catalog_names = set(map(lambda c: c['tap_stream_id'], found_catalogs)) + self.assertSetEqual(self.expected_check_streams(), found_catalog_names, + msg="discovered schemas do not match") + LOGGER.info("discovered schemas are OK") + + return found_catalogs + + def run_and_verify_sync(self, conn_id): + """ + Run a sync job and make sure it exited properly. + Return a dictionary with keys of streams synced + and values of records synced for each stream + """ + # Run a sync job using orchestrator + sync_job_name = runner.run_sync_mode(self, conn_id) + + # Verify tap and target exit codes + exit_status = menagerie.get_exit_status(conn_id, sync_job_name) + menagerie.verify_sync_exit_status(self, exit_status, sync_job_name) + + # Verify actual rows were synced + sync_record_count = runner.examine_target_output_file(self, + conn_id, + self.expected_streams(), + self.expected_primary_keys()) + total_row_count = sum(sync_record_count.values()) + self.assertGreater(total_row_count, 0, + msg="failed to replicate any data: {}".format(sync_record_count)) + LOGGER.info("total replicated row count: %s", total_row_count) + + return sync_record_count + + def perform_and_verify_table_and_field_selection(self, + conn_id, + test_catalogs, + select_all_fields=True): + """ + Perform table and field selection based off of the streams to select + set and field selection parameters. + + Verify this results in the expected streams selected and all or no + fields selected for those streams. + """ + + # Select all available fields or select no fields from all testable streams + self.select_all_streams_and_fields( + conn_id=conn_id, catalogs=test_catalogs, select_all_fields=select_all_fields + ) + + catalogs = menagerie.get_catalogs(conn_id) + + # Ensure our selection affects the catalog + expected_selected = [tc.get('tap_stream_id') for tc in test_catalogs] + for cat in catalogs: + catalog_entry = menagerie.get_annotated_schema(conn_id, cat['stream_id']) + + # Verify all testable streams are selected + selected = catalog_entry.get('annotated-schema').get('selected') + LOGGER.info("Validating selection on %s: %s", cat['stream_name'], selected) + if cat['stream_name'] not in expected_selected: + self.assertFalse(selected, msg="Stream selected, but not testable.") + continue # Skip remaining assertions if we aren't selecting this stream + self.assertTrue(selected, msg="Stream not selected.") + + if select_all_fields: + # Verify all fields within each selected stream are selected + for field, field_props in catalog_entry.get('annotated-schema').get('properties').items(): + field_selected = field_props.get('selected') + LOGGER.info("\tValidating selection on %s.%s: %s", + cat['stream_name'], field, field_selected) + self.assertTrue(field_selected, msg="Field not selected.") + else: + # Verify only automatic fields are selected + expected_automatic_fields = self.expected_automatic_fields().get(cat['tap_stream_id']) + selected_fields = self.get_selected_fields_from_metadata(catalog_entry['metadata']) + self.assertEqual(expected_automatic_fields, selected_fields) + + @staticmethod + def get_selected_fields_from_metadata(metadata): + selected_fields = set() + for field in metadata: + is_field_metadata = len(field['breadcrumb']) > 1 + inclusion_automatic_or_selected = (field['metadata'].get('inclusion') == 'automatic' + or field['metadata'].get('selected') is True) + if is_field_metadata and inclusion_automatic_or_selected: + selected_fields.add(field['breadcrumb'][1]) + return selected_fields + + @staticmethod + def select_all_streams_and_fields(conn_id, catalogs, select_all_fields: bool = True): + """Select all streams and all fields within streams""" + for catalog in catalogs: + schema = menagerie.get_annotated_schema(conn_id, catalog['stream_id']) + + non_selected_properties = [] + if not select_all_fields: + # get a list of all properties so that none are selected + non_selected_properties = schema.get('annotated-schema', {}).get( + 'properties', {}).keys() + + connections.select_catalog_and_fields_via_metadata( + conn_id, catalog, schema, [], non_selected_properties) + + def timedelta_formatted(self, dtime, days=0, str_format="%Y-%m-%dT00:00:00Z"): + date_stripped = dt.strptime(dtime, str_format) + return_date = date_stripped + timedelta(days=days) + + return dt.strftime(return_date, str_format) + + ################################ + # Tap Specific Test Actions # + ################################ + + def datetime_from_timestamp(self, value, str_format="%Y-%m-%dT00:00:00Z"): + """ + Takes in a unix timestamp in milliseconds. + Returns a string formatted python datetime + """ + try: + datetime_value = dt.fromtimestamp(value) + datetime_str = dt.strftime(datetime_value, str_format) + except ValueError as err: + raise NotImplementedError( + f"Invalid argument 'value': {value} " + "This method was designed to accept unix timestamps in milliseconds." + ) + return datetime_str + + def is_child(self, stream): + """return true if this stream is a child stream""" + return self.expected_metadata()[stream].get(self.PARENT_STREAM) is not None diff --git a/archive/tests/client.py b/archive/tests/client.py new file mode 100644 index 0000000..923a191 --- /dev/null +++ b/archive/tests/client.py @@ -0,0 +1,1679 @@ +import datetime +import random +import uuid + +import backoff +import requests +from base import HubspotBaseTest +from tap_tester import LOGGER + +DEBUG = False +BASE_URL = "https://api.hubapi.com" + + +class TestClient(): + START_DATE_FORMAT = "%Y-%m-%dT00:00:00Z" + V3_DEALS_PROPERTY_PREFIXES = {'hs_date_entered', 'hs_date_exited', 'hs_time_in'} + BOOKMARK_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' + + ########################################################################## + ### CORE METHODS + ########################################################################## + + def giveup(exc): + """Checks a response status code, returns True if unsuccessful unless rate limited.""" + if exc.response.status_code == 429: + return False + + return exc.response is not None \ + and 400 <= exc.response.status_code < 500 + + @backoff.on_exception(backoff.constant, + (requests.exceptions.RequestException, + requests.exceptions.HTTPError), + max_tries=5, + jitter=None, + giveup=giveup, + interval=10) + def get(self, url, params=dict()): + """Perform a GET using the standard requests method and logs the action""" + response = requests.get(url, params=params, headers=self.HEADERS) + LOGGER.info(f"TEST CLIENT | GET {url} params={params} STATUS: {response.status_code}") + response.raise_for_status() + json_response = response.json() + + return json_response + + @backoff.on_exception(backoff.constant, + (requests.exceptions.RequestException, + requests.exceptions.HTTPError), + max_tries=5, + jitter=None, + giveup=giveup, + interval=10) + def post(self, url, data=dict(), params=dict(), debug=DEBUG): + """Perfroma a POST using the standard requests method and log the action""" + + headers = dict(self.HEADERS) + headers['content-type'] = "application/json" + response = requests.post(url, json=data, params=params, headers=headers) + LOGGER.info( + f"TEST CLIENT | POST {url} data={data} params={params} STATUS: {response.status_code}") + if debug: + LOGGER.debug(response.text) + + response.raise_for_status() + + if response.status_code == 204: + LOGGER.warn(f"TEST CLIENT Response is empty") + # NB: There is a simplejson.scanner.JSONDecodeError thrown when we attempt + # to do a response.json() on a 204 response. To get around this we just return an empty list + # as we assume that a 204 will not have body. A better implementation would be to catch the + # decode error, however were not able to get approach working. + return [] + + json_response = response.json() + return json_response + + @backoff.on_exception(backoff.constant, + (requests.exceptions.RequestException, + requests.exceptions.HTTPError), + max_tries=5, + jitter=None, + giveup=giveup, + interval=10) + def put(self, url, data, params=dict(), debug=DEBUG): + """Perfroma a PUT using the standard requests method and log the action""" + headers = dict(self.HEADERS) + headers['content-type'] = "application/json" + response = requests.put(url, json=data, params=params, headers=headers) + LOGGER.info( + f"TEST CLIENT | PUT {url} data={data} params={params} STATUS: {response.status_code}") + if debug: + LOGGER.debug(response.text) + + response.raise_for_status() + + @backoff.on_exception(backoff.constant, + (requests.exceptions.RequestException, + requests.exceptions.HTTPError), + max_tries=5, + jitter=None, + giveup=giveup, + interval=10) + def patch(self, url, data, params=dict(), debug=DEBUG): + """Perfroma a PATCH using the standard requests method and log the action""" + headers = dict(self.HEADERS) + headers['content-type'] = "application/json" + response = requests.patch(url, json=data, params=params, headers=headers) + LOGGER.info( + f"TEST CLIENT | PATCH {url} data={data} params={params} STATUS: {response.status_code}") + if debug: + LOGGER.debug(response.text) + + response.raise_for_status() + + @backoff.on_exception(backoff.constant, + (requests.exceptions.RequestException, + requests.exceptions.HTTPError), + max_tries=5, + jitter=None, + giveup=giveup, + interval=10) + def delete(self, url, params=dict(), debug=DEBUG): + """Perfroma a POST using the standard requests method and log the action""" + + headers = dict(self.HEADERS) + headers['content-type'] = "application/json" + response = requests.delete(url, params=params, headers=headers) + LOGGER.info(f"TEST CLIENT | DELETE {url} params={params} STATUS: {response.status_code}") + if debug: + LOGGER.debug(response.text) + response.raise_for_status() + + def denest_properties(self, stream, records): + """ + Takes a list of records and checks each for a 'properties' key to denest. + Returns the list of denested records. + """ + for record in records: + if record.get('properties'): + for property_key, property_value in record['properties'].items(): + + if isinstance(property_value, dict): + # if any property has a versions object track it by the top level key 'properties_versions' + if property_value.get('versions'): + if not record.get('properties_versions'): + record['properties_versions'] = [] + record['properties_versions'] += property_value['versions'] + + # denest each property to be a top level key + record[f'property_{property_key}'] = property_value + + LOGGER.info(f"TEST CLIENT | Transforming (denesting) {len(records)} {stream} records") + return records + + def datatype_transformations(self, stream, records): + """ + Takes a list of records and checks each for a 'properties' key to denest. + Returns the list of denested records. + """ + datetime_columns = { + 'owners': {'createdAt', 'updatedAt'}, + } + if stream in datetime_columns.keys(): + for record in records: + for column in record.keys(): + if column in datetime_columns[stream]: + record[column] = self.BaseTest.datetime_from_timestamp( + record[column] / 1000, self.BOOKMARK_DATE_FORMAT + ) + + LOGGER.info( + f"TEST CLIENT | Transforming (datatype conversions) {len(records)} {stream} records") + return records + + ########################################################################## + ### GET + ########################################################################## + def read(self, stream, parent_ids=[], since=''): + + # Resets the access_token if the expiry time is less than or equal to the current time + if self.CONFIG["token_expires"] <= datetime.datetime.utcnow(): + self.acquire_access_token_from_refresh_token() + + if stream == 'forms': + return self.get_forms() + elif stream == 'owners': + return self.get_owners() + elif stream == 'companies': + return self.get_companies(since) + elif stream == 'contact_lists': + return self.get_contact_lists(since) + elif stream == 'contacts_by_company': + return self.get_contacts_by_company(parent_ids) + elif stream == 'engagements': + return self.get_engagements() + elif stream == 'campaigns': + return self.get_campaigns() + elif stream == 'deals': + return self.get_deals() + elif stream == 'workflows': + return self.get_workflows() + elif stream == 'contacts': + return self.get_contacts() + elif stream == 'deal_pipelines': + return self.get_deal_pipelines() + elif stream == 'email_events': + return self.get_email_events() + elif stream == 'subscription_changes': + return self.get_subscription_changes(since) + elif stream == "tickets": + return self.get_tickets() + else: + raise NotImplementedError + + def get_campaigns(self): + """ + Get all campaigns by id, then grab the details of each campaign. + """ + campaign_by_id_url = f"{BASE_URL}/email/public/v1/campaigns/by-id" + campaign_url = f"{BASE_URL}/email/public/v1/campaigns/" + + # get all campaigns by-id + response = self.get(campaign_by_id_url) + campaign_ids = [campaign['id'] for campaign in response['campaigns']] + + # get the detailed record corresponding to each campagin-id + records = [] + for campaign_id in campaign_ids: + url = f"{campaign_url}{campaign_id}" + response = self.get(url) + records.append(response) + + return records + + def _get_company_by_id(self, company_id): + url = f"{BASE_URL}/companies/v2/companies/{company_id}" + response = self.get(url) + return response + + def get_companies(self, since=''): + """ + Get all companies by paginating using 'hasMore' and 'offset'. + """ + url = f"{BASE_URL}/companies/v2/companies/paged" + if not since: + since = self.start_date_strf + + if not isinstance(since, datetime.datetime): + since = datetime.datetime.strptime(since, self.START_DATE_FORMAT) + params = {'properties': ["createdate", "hs_lastmodifieddate"]} + records = [] + + # paginating through all the companies + companies = [] + has_more = True + while has_more: + + response = self.get(url, params=params) + + for company in response['companies']: + if company['properties']['hs_lastmodifieddate']: + company_timestamp = datetime.datetime.fromtimestamp( + company['properties']['hs_lastmodifieddate']['timestamp'] / 1000 + ) + else: + company_timestamp = datetime.datetime.fromtimestamp( + company['properties']['createdate']['timestamp'] / 1000 + ) + + if company_timestamp >= since: + companies.append(company) + + has_more = response['has-more'] + params['offset'] = response['offset'] + + # get the details of each company + for company in companies: + response = self._get_company_by_id(company['companyId']) + records.append(response) + + records = self.denest_properties('companies', records) + + return records + + def get_contact_lists(self, since='', list_id=''): + """ + Get all contact_lists by paginating using 'has-more' and 'offset'. + """ + url = f"{BASE_URL}/contacts/v1/lists" + + if list_id: + url += f"/{list_id}" + response = self.get(url) + + return response + + if since == 'all': + params = {'count': 250} + else: + if not since: + since = self.start_date_strf + + if not isinstance(since, datetime.datetime): + since = datetime.datetime.strptime(since, self.START_DATE_FORMAT) + + since = str(since.timestamp() * 1000).split(".")[0] + params = {'since': since, 'count': 250} + + records = [] + replication_key = list(self.replication_keys['contact_lists'])[0] + + # paginating through allxo the contact_lists + has_more = True + while has_more: + + response = self.get(url, params=params) + for record in response['lists']: + + if since == 'all' or int(since) <= record[replication_key]: + records.append(record) + + has_more = response['has-more'] + params['offset'] = response['offset'] + + return records + + def _get_contacts_by_pks(self, pks): + """ + Get a specific contact by using the primary key value. + + :params pks: vids + :return: the contacts record + """ + url_2 = f"{BASE_URL}/contacts/v1/contact/vids/batch/" + params_2 = { + 'showListMemberships': True, + 'formSubmissionMode': "all", + } + records = [] + # get the detailed contacts records by vids + params_2['vid'] = pks + response_2 = self.get(url_2, params=params_2) + for vid, record in response_2.items(): + ts_ms = int(record['properties']['lastmodifieddate']['value']) / 1000 + converted_ts = self.BaseTest.datetime_from_timestamp( + ts_ms, self.BOOKMARK_DATE_FORMAT + ) + record['versionTimestamp'] = converted_ts + + records.append(record) + + records = self.denest_properties('contacts', records) + + return records[0] + + def get_contacts(self): + """ + Get all contact vids by paginating using 'has-more' and 'vid-offset/vidOffset'. + Then use the vids to grab the detailed contacts records. + """ + url_1 = f"{BASE_URL}/contacts/v1/lists/all/contacts/all" + params_1 = { + 'showListMemberships': True, + 'includeVersion': True, + 'count': 100, + } + vids = [] + url_2 = f"{BASE_URL}/contacts/v1/contact/vids/batch/" + params_2 = { + 'showListMemberships': True, + 'formSubmissionMode': "all", + } + records = [] + + has_more = True + while has_more: + # get a page worth of contacts and pull the vids + response_1 = self.get(url_1, params=params_1) + vids = [record['vid'] for record in response_1['contacts'] + if record['versionTimestamp'] >= self.start_date] + has_more = response_1['has-more'] + params_1['vidOffset'] = response_1['vid-offset'] + + # get the detailed contacts records by vids + params_2['vid'] = vids + response_2 = self.get(url_2, params=params_2) + records.extend([record for record in response_2.values()]) + + records = self.denest_properties('contacts', records) + return records + + def get_contacts_by_company(self, parent_ids): + """ + Get all contacts_by_company iterating over compnayId's and + paginating using 'hasMore' and 'vidOffset'. This stream is essentially + a join on contacts and companies. + NB: This stream is a CHILD of 'companies'. If any test needs to pull expected + data from this endpoint, it requires getting all 'companies' data and then + pulling the 'companyId' from each record to perform the corresponding get here. + """ + + url = f"{BASE_URL}/companies/v2/companies/{{}}/vids" + params = dict() + records = [] + + for parent_id in parent_ids: + child_url = url.format(parent_id) + has_more = True + while has_more: + + response = self.get(child_url, params=params) + for vid in response.get('vids', {}): + records.extend([{'company-id': parent_id, + 'contact-id': vid}]) + + has_more = response['hasMore'] + params['vidOffset'] = response['vidOffset'] + + params = dict() + + return records + + def get_deal_pipelines(self): + """ + Get all deal_pipelines. + """ + url = f"{BASE_URL}/deals/v1/pipelines" + records = [] + + response = self.get(url) + records.extend(response) + + records = self.denest_properties('deal_pipelines', records) + return records + + def _get_deals_by_pk(self, deal_id): + url = f"{BASE_URL}/deals/v1/deal/{deal_id}" + params = {'includeAllProperties': True} + response = self.get(url, params=params) + + return response + + def get_deals(self): + """ + Get all deals from the v1 endpoiint by paginating using 'hasMore' and 'offset'. + For each deals record denest 'properties' so that they are prefxed with 'property_' + and located at the top level. + """ + v1_url = f"{BASE_URL}/deals/v1/deal/paged" + + v1_params = {'includeAllProperties': True, + 'allPropertiesFetchMode': 'latest_version', + 'properties': []} + replication_key = list(self.replication_keys['deals'])[0] + records = [] + + # hit the v1 endpoint to get the record + has_more = True + while has_more: + response = self.get(v1_url, params=v1_params) + records.extend([record for record in response['deals'] + # Here replication key of the deals stream is derived from "hs_lastmodifieddate" field. + if record['properties']["hs_lastmodifieddate"][ + 'timestamp'] >= self.start_date]) + has_more = response['hasMore'] + v1_params['offset'] = response['offset'] + + # batch the v1 response ids into groups of 100 + v1_ids = [{'id': str(record['dealId'])} for record in records] + batches = [] + batch_size = 100 + for i in range(0, len(v1_ids), batch_size): + batches.append(v1_ids[i:i + batch_size]) + + # hit the v3 endpoint to get the special hs_ fields from v3 'properties' + v3_url = f"{BASE_URL}/crm/v3/objects/deals/batch/read" + v3_property = ['hs_date_entered_appointmentscheduled'] + v3_records = [] + for batch in batches: + data = {'inputs': batch, + 'properties': v3_property} + v3_response = self.post(v3_url, data) + v3_records += v3_response['results'] + + # pull the desired properties from the v3 records and add them to correspond v1 records + for v3_record in v3_records: + for record in records: + if v3_record['id'] == str(record['dealId']): + # don't inclue the v3 property if the value is None + non_null_v3_properties = {v3_property_key: v3_property_value + for v3_property_key, v3_property_value in + v3_record['properties'].items() + if v3_property_value is not None} + + # only grab v3 properties with a specific prefix + trimmed_v3_properties = {v3_property_key: v3_property_value + for v3_property_key, v3_property_value in + non_null_v3_properties.items() + if any([v3_property_key.startswith(prefix) + for prefix in + self.V3_DEALS_PROPERTY_PREFIXES])} + + # the v3 properties must be restructured into objects to match v1 + v3_properties = {v3_property_key: {'value': v3_property_value} + for v3_property_key, v3_property_value in + trimmed_v3_properties.items()} + + # add the v3 record properties to the v1 record + record['properties'].update(v3_properties) + + records = self.denest_properties('deals', records) + return records + + def get_email_events(self, recipient=''): + """ + Get all email_events by paginating using 'hasMore' and 'offset'. + """ + url = f"{BASE_URL}/email/public/v1/events" + replication_key = list(self.replication_keys['email_events'])[0] + params = dict() + if recipient: + params['recipient'] = recipient + records = [] + + has_more = True + while has_more: + response = self.get(url, params=params) + + records.extend([record for record in response['events'] + if record['created'] >= self.start_date]) + + has_more = response['hasMore'] + params['offset'] = response['offset'] + + return records + + def _get_engagements_by_pk(self, engagement_id): + """ + Get a specific engagement reocrd using it's id + """ + url = f"{BASE_URL}/engagements/v1/engagements/{engagement_id}" + + response = self.get(url) + + # added by tap + response['engagement_id'] = response['engagement']['id'] + response['lastUpdated'] = response['engagement']['lastUpdated'] + + return response + + def get_engagements(self): + """ + Get all engagements by paginating using 'hasMore' and 'offset'. + """ + url = f"{BASE_URL}/engagements/v1/engagements/paged" + replication_key = list(self.replication_keys['engagements'])[0] + params = {'limit': 250} + records = [] + + has_more = True + while has_more: + + response = self.get(url, params=params) + for result in response['results']: + if result['engagement'][replication_key] >= self.start_date: + result['engagement_id'] = result['engagement']['id'] + result['lastUpdated'] = result['engagement']['lastUpdated'] + records.append(result) + + has_more = response['hasMore'] + params['offset'] = response['offset'] + + return records + + def _get_forms_by_pk(self, form_id): + """ + Get a specific forms record using the 'form_guid'. + :params form_id: the 'form_guid' value + """ + url = f"{BASE_URL}/forms/v2/forms/{form_id}" + response = self.get(url) + + return response + + def get_forms(self): + """ + Get all forms. + """ + url = f"{BASE_URL}/forms/v2/forms" + replication_key = list(self.replication_keys['forms'])[0] + records = [] + + response = self.get(url) + records.extend([record for record in response + if record[replication_key] >= self.start_date]) + + return records + + def get_owners(self): + """ + Get all owners. + """ + url = f"{BASE_URL}/owners/v2/owners" + records = self.get(url) + transformed_records = self.datatype_transformations('owners', records) + return transformed_records + + def get_subscription_changes(self, since=''): + """ + Get all subscription_changes from 'since' date by paginating using 'hasMore' and 'offset'. + Default since date is one week ago + """ + url = f"{BASE_URL}/email/public/v1/subscriptions/timeline" + params = dict() + records = [] + replication_key = list(self.replication_keys['subscription_changes'])[0] + if not since: + since = self.start_date_strf + + if not isinstance(since, datetime.datetime): + since = datetime.datetime.strptime(since, self.START_DATE_FORMAT) + since = str(since.timestamp() * 1000).split(".")[0] + # copied overparams = {'properties': ["createdate", "hs_lastmodifieddate"]} + has_more = True + while has_more: + response = self.get(url, params=params) + has_more = response['hasMore'] + params['offset'] = response['offset'] + for record in response['timeline']: + # Future Testing TDL-16166 | Investigate difference between timestamp and startTimestamp + # this won't be feasible until BUG_TDL-14938 is addressed + if int(since) <= record['timestamp']: + records.append(record) + + return records + + def _get_workflows_by_pk(self, workflow_id=''): + """Get a specific workflow by pk value""" + url = f"{BASE_URL}/automation/v3/workflows/{workflow_id}" + + response = self.get(url) + + return response + + def get_workflows(self): + """ + Get all workflows. + """ + url = f"{BASE_URL}/automation/v3/workflows/" + replication_key = list(self.replication_keys['workflows'])[0] + records = [] + + response = self.get(url) + + records.extend([record for record in response['workflows'] + if record[replication_key] >= self.start_date]) + return records + + def _get_tickets_by_pk(self, ticket_id): + """ + Get a specific ticket by pk value + HubSpot API https://developers.hubspot.com/docs/api/crm/tickets + """ + url = f"{BASE_URL}/crm/v4/objects/tickets/{ticket_id}?associations=contact,company,deals" + response = self.get(url) + return response + + def get_tickets_properties(self): + """ + Get tickets properties. + HubSpot API https://developers.hubspot.com/docs/api/crm/tickets + """ + url = f"{BASE_URL}/crm/v3/properties/tickets" + # records = [] + records = self.get(url) + + return ",".join([record["name"] for record in records["results"]]) + + def get_tickets(self): + """ + Get all tickets. + HubSpot API https://developers.hubspot.com/docs/api/crm/tickets + """ + url = f"{BASE_URL}/crm/v4/objects/tickets" + replication_key = list(self.replication_keys["tickets"])[0] + records = [] + + # response = self.get(url) + + params = {"limit": 100, "associations": "contact,company,deals", 'properties': self.get_tickets_properties()} + while True: + response = self.get(url, params=params) + + records.extend([record + for record in response["results"] + if record[replication_key] >= self.start_date_strf.replace('.Z', '.000Z')]) + + if not response.get("paging"): + break + params["after"] = response.get("paging").get("next").get("after") + + records = self.denest_properties('tickets', records) + return records + + ########################################################################## + ### CREATE + ########################################################################## + + def create(self, stream, company_ids=[], subscriptions=[], times=1): + """Dispatch create to make tests clean.""" + + # Resets the access_token if the expiry time is less than or equal to the current time + if self.CONFIG["token_expires"] <= datetime.datetime.utcnow(): + self.acquire_access_token_from_refresh_token() + + if stream == 'forms': + return self.create_forms() + elif stream == 'owners': + return self.create_owners() + elif stream == 'companies': + return self.create_companies() + elif stream == 'contact_lists': + return self.create_contact_lists() + elif stream == 'contacts_by_company': + return self.create_contacts_by_company(company_ids, times=times) + elif stream == 'engagements': + return self.create_engagements() + elif stream == 'campaigns': + return self.create_campaigns() + elif stream == 'deals': + return self.create_deals() + elif stream == 'workflows': + return self.create_workflows() + elif stream == 'contacts': + return self.create_contacts() + elif stream == 'deal_pipelines': + return self.create_deal_pipelines() + elif stream == 'email_events': + LOGGER.warn( + f"TEST CLIENT | Calling the create_subscription_changes method to generate {stream} records" + ) + return self.create_subscription_changes() + elif stream == 'subscription_changes': + return self.create_subscription_changes(subscriptions, times) + elif stream == 'tickets': + return self.create_tickets() + else: + raise NotImplementedError(f"There is no create_{stream} method in this dipatch!") + + def create_contacts(self): + """ + Generate a single contacts record. + Hubspot API https://legacydocs.hubspot.com/docs/methods/contacts/create_contact + """ + record_uuid = str(uuid.uuid4()).replace('-', '') + + url = f"{BASE_URL}/contacts/v1/contact" + data = { + "properties": [ + { + "property": "email", + "value": f"{record_uuid}@stitchdata.com" + }, + { + "property": "firstname", + "value": "Yusaku" + }, + { + "property": "lastname", + "value": "Kasahara" + }, + { + "property": "website", + "value": "http://app.stitchdata.com" + }, + { + "property": "phone", + "value": "555-122-2323" + }, + { + "property": "address", + "value": "25 First Street" + }, + { + "property": "city", + "value": "Cambridge" + }, + { + "property": "state", + "value": "MA" + }, + { + "property": "zip", + "value": "02139" + } + ] + } + + # generate a contacts record + response = self.post(url, data) + records = [response] + + get_url = f"{BASE_URL}/contacts/v1/contact/vid/{response['vid']}/profile" + params = {'includeVersion': True} + get_resp = self.get(get_url, params=params) + + converted_versionTimestamp = self.BaseTest.datetime_from_timestamp( + get_resp['versionTimestamp'] / 1000, self.BOOKMARK_DATE_FORMAT + ) + get_resp['versionTimestamp'] = converted_versionTimestamp + records = self.denest_properties('contacts', [get_resp]) + + return records + + def create_campaigns(self): + """ + Couldn't find endpoint... + """ + # record_uuid = str(uuid.uuid4()).replace('-', '') + + # url = f"{BASE_URL}" + # data = {} + # generate a record + # response = self.post(url, data) + # records = [response] + # return records + raise NotImplementedError("No endpoint available in hubspot api.") + + def create_companies(self): + """ + It takes about 6 seconds after the POST for the created record to be caught by the next GET. + This is intended for generating one record for companies. + HubSpot API https://legacydocs.hubspot.com/docs/methods/companies/create_company + """ + record_uuid = str(uuid.uuid4()).replace('-', '') + + url = f"{BASE_URL}/companies/v2/companies/" + data = {"properties": [{"name": "name", "value": f"Company Name {record_uuid}"}, + {"name": "description", "value": "company description"}]} + + # generate a record + response = self.post(url, data) + records = [response] + return records + + def create_contact_lists(self): + """ + HubSpot API https://legacydocs.hubspot.com/docs/methods/lists/create_list + + NB: This generates a list based on a 'twitterhandle' filter. There are many + different filters, but at the time of implementation it did not seem that + using different filters would result in any new fields. + """ + record_uuid = str(uuid.uuid4()).replace('-', '') + + url = f"{BASE_URL}/contacts/v1/lists/" + data = { + "name": f"tweeters{record_uuid}", + "dynamic": True, + "filters": [ + [{ + "operator": "EQ", + "value": f"@hubspot{record_uuid}", + "property": "twitterhandle", + "type": "string" + }] + ] + } + # generate a record + response = self.post(url, data) + records = [response] + return records + + def create_contacts_by_company(self, company_ids=[], contact_records=[], times=1): + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/associate-objects + """ + url = f"{BASE_URL}/crm-associations/v1/associations" + if not company_ids: + company_ids = [company['companyId'] for company in self.get_companies()] + if not contact_records: + contact_records = self.get_contacts() + + records = [] + for _ in range(times): + for company_id in set(company_ids): + for contact in contact_records: + # look for a contact that is not already in the contacts_by_company list + if contact['vid'] not in [record['contact-id'] for record in records]: + contact_id = contact['vid'] + data = { + "fromObjectId": company_id, + "toObjectId": contact_id, + "category": "HUBSPOT_DEFINED", + "definitionId": 2 + } + # generate a record + self.put(url, data) + record = {'company-id': company_id, 'contact-id': contact_id} + records.append(record) + break + + if records: + break + + return records + + def create_deal_pipelines(self): + """ + HubSpot API + https://legacydocs.hubspot.com/docs/methods/pipelines/create_new_pipeline + """ + timestamp1 = str(datetime.datetime.now().timestamp()).replace(".", "") + timestamp2 = str(datetime.datetime.now().timestamp()).replace(".", "") + url = f"{BASE_URL}/crm-pipelines/v1/pipelines/deals" + data = { + "pipelineId": timestamp1, + "label": f"API test ticket pipeline {timestamp1}", + "displayOrder": 2, + "active": True, + "stages": [ + { + "stageId": f"example_stage {timestamp1}", + "label": f"Example stage{timestamp1}", + "displayOrder": 1, + "metadata": { + "probability": 0.5 + } + }, + { + "stageId": f"another_example_stage{timestamp2}", + "label": f"Another example stage{timestamp2}", + "displayOrder": 2, + "metadata": { + "probability": 1.0 + } + } + ] + } + + # generate a record + response = self.post(url, data) + records = [response] + return records + + def create_deals(self): + """ + HubSpot API https://legacydocs.hubspot.com/docs/methods/deals/create_deal + + NB: We are currently using the 'default' pipeline and a single stage. This + is intentional so that we do not accidentally use a pipeline that may be deleted. + """ + record_uuid = str(uuid.uuid4()).replace('-', '') + + url = f"{BASE_URL}/deals/v1/deal/" + data = { + "associations": { + "associatedCompanyIds": [ + 6804176293 + ], + "associatedVids": [ + 2304 + ] + }, + "properties": [ + { + "value": "Tim's Newer Deal", + "name": "dealname" + }, + { + "value": "appointmentscheduled", + "name": "dealstage" + }, + { + "value": "default", + "name": "pipeline" + }, + { + "value": "98621200", + "name": "hubspot_owner_id" + }, + { + "value": 1409443200000, + "name": "closedate" + }, + { + "value": "60000", + "name": "amount" + }, + { + "value": "newbusiness", + "name": "dealtype" + } + ] + } + + # generate a record + response = self.post(url, data) + records = [response] + return records + + def create_tickets(self): + """ + HubSpot API https://developers.hubspot.com/docs/api/crm/tickets + """ + url = f"{BASE_URL}/crm/v4/objects/tickets" + record_uuid = str(uuid.uuid4()).replace('-', '') + data = { + "properties": { + "content": f"Created for testing purpose - {record_uuid}", + "hs_pipeline": "0", + "hs_pipeline_stage": "1", + "hs_ticket_priority": "MEDIUM", + "subject": f"Sample ticket name - {record_uuid}" + } + } + + # generate a record + response = self.post(url, data) + return [response] + + def create_email_events(self): + """ + HubSpot API https://legacydocs.hubspot.com/docs/methods/email/email_events_overview + + We are able to create email_events by updating email subscription status with a PUT (create_subscription_changes()). + If trying to expand data for other email_events, manually creating data and pinning start_date for a connection is + the preferred approach. We do not currently rely on this approach. + """ + + raise NotImplementedError( + "Use create_subscription_changes instead to create records for email_events stream") + + def create_engagements(self): + """ + HubSpot API https://legacydocs.hubspot.com/docs/methods/engagements/create_engagement + NB: Dependent on valid (currently hardcoded) companyId, and ownerId. + THIS IS A POTENTIAL POINT OF INSTABILITY FOR THE TESTS + """ + record_uuid = str(uuid.uuid4()).replace('-', '') + + # gather all contacts and randomly choose one that has not hit the limit + contact_records = self.get_contacts() + contact_ids = [contact['vid'] + for contact in contact_records + if contact['vid'] != 2304] # contact 2304 has hit the 10,000 assoc limit + contact_id = random.choice(contact_ids) + + url = f"{BASE_URL}/engagements/v1/engagements" + data = { + "engagement": { + "active": True, + "ownerId": 98621200, + "type": "NOTE", + "timestamp": 1409172644778 + }, + "associations": { + "contactIds": [contact_id], + "companyIds": [6804176293], + "dealIds": [], + "ownerIds": [], + "ticketIds": [] + }, + "attachments": [ + { + "id": 4241968539 + } + ], + "metadata": { + "body": "note body" + } + } + + # generate a record + response = self.post(url, data) + response['engagement_id'] = response['engagement']['id'] + + records = [response] + return records + + def create_forms(self): + """ + HubSpot API https://legacydocs.hubspot.com/docs/methods/forms/v2/create_form + """ + record_uuid = str(uuid.uuid4()).replace('-', '') + + url = f"{BASE_URL}/forms/v2/forms" + data = { + "name": f"DemoForm{record_uuid}", + "action": "", + "method": "", + "cssClass": "", + "redirect": "", + "submitText": "Submit", + "followUpId": "", + "notifyRecipients": "", + "leadNurturingCampaignId": "", + "formFieldGroups": [ + { + "fields": [ + { + "name": "firstname", + "label": "First Name", + "type": "string", + "fieldType": "text", + "description": "", + "groupName": "", + "displayOrder": 0, + "required": False, + "selectedOptions": [], + "options": [], + "validation": { + "name": "", + "message": "", + "data": "", + "useDefaultBlockList": False + }, + "enabled": True, + "hidden": False, + "defaultValue": "", + "isSmartField": False, + "unselectedLabel": "", + "placeholder": "" + } + ], + "default": True, + "isSmartGroup": False + }, + { + "fields": [ + { + "name": "lastname", + "label": "Last Name", + "type": "string", + "fieldType": "text", + "description": "", + "groupName": "", + "displayOrder": 1, + "required": False, + "selectedOptions": [], + "options": [], + "validation": { + "name": "", + "message": "", + "data": "", + "useDefaultBlockList": False + }, + "enabled": True, + "hidden": False, + "defaultValue": "", + "isSmartField": False, + "unselectedLabel": "", + "placeholder": "" + } + ], + "default": True, + "isSmartGroup": False + }, + # KDS: Removed due to INVALID_FORM_FIELDS error. + # { + # "fields": [ + # { + # "name": "adress_1", + # "label": "Adress 1", + # "type": "string", + # "fieldType": "text", + # "description": "", + # "groupName": "", + # "displayOrder": 2, + # "required": False, + # "selectedOptions": [], + # "options": [], + # "validation": { + # "name": "", + # "message": "", + # "data": "", + # "useDefaultBlockList": False + # }, + # "enabled": True, + # "hidden": False, + # "defaultValue": "", + # "isSmartField": False, + # "unselectedLabel": "", + # "placeholder": "" + # } + # ], + # "default": True, + # "isSmartGroup": False + # } + ], + "performableHtml": "", + "migratedFrom": "ld", + "ignoreCurrentValues": False, + "metaData": [], + "deletable": True + } + + # generate a record + response = self.post(url, data) + records = [response] + return records + + def create_owners(self): + """ + HubSpot API The Owners API is read-only. Owners can only be created in HubSpot. + """ + raise NotImplementedError( + "Only able to create owners from web app manually. No api endpoint exists.") + + def create_subscription_changes(self, subscriptions=[], times=1): + """ + HubSpot API https://legacydocs.hubspot.com/docs/methods/email/update_status + + NB: This will update email_events as well. + """ + # by default, a new subscription change will be created from a previous subscription change from one week ago as defined in the get + if subscriptions == []: + subscriptions = self.get_subscription_changes() + subscription_id_list = [[change.get('subscriptionId') for change in subscription['changes']] + for subscription in subscriptions] + count = 0 + email_records = [] + subscription_records = [] + LOGGER.info(f"creating {times} records") + + for item in subscription_id_list: + if count < times: + # if item[0] + record_uuid = str(uuid.uuid4()).replace('-', '') + recipient = record_uuid + "@stitchdata.com" + url = f"{BASE_URL}/email/public/v1/subscriptions/{recipient}" + data = { + "subscriptionStatuses": [ + { + "id": item[0], # a_sub_id, + "subscribed": True, + "optState": "OPT_IN", + "legalBasis": "PERFORMANCE_OF_CONTRACT", + "legalBasisExplanation": "We need to send them these emails as part of our agreement with them." + } + ] + } + # generate a record + response = self.put(url, data) + + # Cleanup this method once BUG_TDL-14938 is addressed + # The intention is for this method to return both of the objects that it creates with this put + + email_event = self.get_email_events(recipient=recipient) + # subscriptions = self.get_subscription_changes() + # if len(email_event) > 1 or len(subscription_change) > 1: + # raise RuntimeError( + # "Expected this change to generate 1 email_event and 1 subscription_change only. " + # "Generate {len(email_event)} email_events and {len(subscription_changes)} subscription_changes." + # ) + email_records.extend(email_event) + # subscription_records.append(subscription_change) + count += 1 + + return email_records # , subscription_records + + def create_workflows(self): + """ + HubSpot API https://legacydocs.hubspot.com/docs/methods/workflows/v3/create_workflow + """ + record_uuid = str(uuid.uuid4()).replace('-', '') + + url = f"{BASE_URL}/automation/v3/workflows" + data = { + "name": "Test Workflow", + "type": "DRIP_DELAY", + "onlyEnrollsManually": True, + "enabled": True, + "actions": [ + { + "type": "DELAY", + "delayMillis": 3600000 + }, + { + "newValue": "HubSpot", + "propertyName": "company", + "type": "SET_CONTACT_PROPERTY" + }, + { + "type": "WEBHOOK", + "url": "https://www.myintegration.com/webhook.php", + "method": "POST", + "authCreds": { + "user": "user", + "password": "password" + } + } + ] + } + + # generate a record + response = self.post(url, data) + records = [response] + return records + + ########################################################################## + ### Updates + ########################################################################## + + def update(self, stream, record_id): + + # Resets the access_token if the expiry time is less than or equal to the current time + if self.CONFIG["token_expires"] <= datetime.datetime.utcnow(): + self.acquire_access_token_from_refresh_token() + + if stream == 'companies': + return self.update_companies(record_id) + elif stream == 'contacts': + return self.update_contacts(record_id) + elif stream == 'contact_lists': + return self.update_contact_lists(record_id) + elif stream == 'deal_pipelines': + return self.update_deal_pipelines(record_id) + elif stream == 'deals': + return self.update_deals(record_id) + elif stream == 'forms': + return self.update_forms(record_id) + elif stream == 'engagements': + return self.update_engagements(record_id) + elif stream == 'tickets': + return self.update_tickets(record_id) + else: + raise NotImplementedError(f"Test client does not have an update method for {stream}") + + def update_workflows(self, workflow_id, contact_email): + """ + Update a workflow by enrolling a contact in the workflow. + Hubspot API https://legacydocs.hubspot.com/docs/methods/workflows/add_contact + + NB: Attemtped to enroll a contact but this did not change anything on the record. Enrollment is handled by + settings which are fields on a workflows record. The actual contacts' enrollment is not part of this record. + """ + + raise NotImplementedError("No endpoint in hubspot api for updating workflows.") + + def updated_subscription_changes(self, subscription_id): + return self.create_subscription_changes(subscription_id) + + def update_campaigns(self): + """ + Couldn't find endpoint... + """ + raise NotImplementedError("No endpoint for updating campaigns in hubspot api.") + + def update_companies(self, company_id): + """ + Update a company by changing it's description + :param company_id: the primary key value of the company to update + :return: the updated record using the _get_company_by_id + + Hubspot API https://legacydocs.hubspot.com/docs/methods/companies/update_company + """ + url = f"{BASE_URL}/companies/v2/companies/{company_id}" + + record_uuid = str(uuid.uuid4()).replace('-', '') + data = { + "properties": [ + { + "name": "description", + "value": f"An updated description {record_uuid}" + } + ] + } + self.put(url, data) + + record = self._get_company_by_id(company_id) + + return record + + def update_contacts(self, vid): + """ + Update a single contact record with a new email. + Hubspot API https://legacydocs.hubspot.com/docs/methods/contacts/update_contact + + :param vid: the primary key value of the record to update + :return: the updated record using the get_contracts_by_pks method + """ + url = f"{BASE_URL}/contacts/v1/contact/vid/{vid}/profile" + + record_uuid = str(uuid.uuid4()).replace('-', '') + data = { + "properties": [ + { + "property": "email", + "value": f"{record_uuid}@stitchdata.com" + }, + { + "property": "firstname", + "value": "Updated" + }, + { + "property": "lastname", + "value": "Record" + }, + { + "property": "lifecyclestage", + "value": "customer" + } + ] + } + _ = self.post(url, data=data) + + record = self._get_contacts_by_pks(pks=[vid]) + + return record + + def update_contact_lists(self, list_id): + """ + Update a single contact list. + Hubspot API https://legacydocs.hubspot.com/docs/methods/lists/update_list + + :param list_id: the primary key value of the record to update + :return: the updated record using the get_contracts_by_pks method + """ + url = f"{BASE_URL}/contacts/v1/lists/{list_id}" + + record_uuid = str(uuid.uuid4()).replace('-', '') + data = {"name": f"Updated {record_uuid}"} + + _ = self.post(url, data=data) + + record = self.get_contact_lists(since='', list_id=list_id) + + return record + + def update_deal_pipelines(self, pipeline_id): + """ + Update a deal_pipeline record by changing it's label. + :param: + :return: + """ + url = f"{BASE_URL}/crm-pipelines/v1/pipelines/deals/{pipeline_id}" + + record_uuid = str(uuid.uuid4()).replace('-', '')[:20] + data = { + "label": f"Updated {record_uuid}", + "displayOrder": 4, + "active": True, + "stages": [ + { + "stageId": record_uuid, + "label": record_uuid, + "displayOrder": 1, + "metadata": { + "probability": 0.5 + } + }, + ] + } + + _ = self.put(url, data=data) + + deal_pipelines = self.get_deal_pipelines() + record = [pipeline for pipeline in deal_pipelines + if pipeline['pipelineId'] == pipeline_id][0] + + return record + + def update_deals(self, deal_id): + """ + HubSpot API https://legacydocs.hubspot.com/docs/methods/deals/update_deal + + :param deal_id: the pk value of the deal record to update + :return: the updated deal record using a PUT and the results from a GET + """ + url = f"{BASE_URL}/deals/v1/deal/{deal_id}" + + record_uuid = str(uuid.uuid4()).replace('-', '')[:20] + data = { + "properties": [ + { + "value": f"Updated {record_uuid}", + "name": "dealname" + }, + ] + } + + # generate a record + _ = self.put(url, data) + + response = self._get_deals_by_pk(deal_id) + + return response + + def update_forms(self, form_id): + """ + Hubspot API https://legacydocs.hubspot.com/docs/methods/forms/v2/update_form + + :params form_id: the pk value of the form record to update + :return: the updated form record using the GET endpoint + """ + url = f"{BASE_URL}/forms/v2/forms/{form_id}" + record_uuid = str(uuid.uuid4()).replace('-', '')[:20] + + data = { + "name": f"Updated {record_uuid}" + } + _ = self.put(url, data=data) + + response = self._get_forms_by_pk(form_id) + + return response + + def update_owners(self): + """ + HubSpot API The Owners API is read-only. Owners can only be updated in HubSpot. + """ + raise NotImplementedError( + "Only able to update owners from web app manuanlly. No API endpoint in hubspot.") + + def update_campaigns(self): + """ + HubSpot API The Campaigns API is read-only. Campaigns can only be updated in HubSpot. + """ + raise NotImplementedError( + "Only able to update campaigns from web app manuanlly. No API endpoint in hubspot.") + + def update_engagements(self, engagement_id): + """ + Hubspot API https://legacydocs.hubspot.com/docs/methods/engagements/update_engagement-patch + :params engagement_id: the pk value of the engagment record to update + :return: + """ + url = f"{BASE_URL}/engagements/v1/engagements/{engagement_id}" + + record_uuid = str(uuid.uuid4()).replace('-', '')[:20] + data = { + "metadata": { + "body": f"Updated {record_uuid}" + } + } + + self.patch(url, data) + + record = self._get_engagements_by_pk(engagement_id) + + return record + + def update_tickets(self, ticket_id): + """ + Hubspot API https://developers.hubspot.com/docs/api/crm/tickets + :params ticket_id: the pk value of the ticket record to update + :return: + """ + url = f"{BASE_URL}/crm/v4/objects/tickets/{ticket_id}" + + record_uuid = str(uuid.uuid4()).replace('-', '')[:20] + data = { + "properties": { + "subject": f"update record for testing - {record_uuid}" + } + } + + self.patch(url, data) + + return self._get_tickets_by_pk(ticket_id) + + ########################################################################## + ### Deletes + ########################################################################## + def cleanup(self, stream, records, count=10): + + # Resets the access_token if the expiry time is less than or equal to the current time + if self.CONFIG["token_expires"] <= datetime.datetime.utcnow(): + self.acquire_access_token_from_refresh_token() + + if stream == 'deal_pipelines': + self.delete_deal_pipelines(records, count) + elif stream == 'contact_lists': + self.delete_contact_lists(records, count) + else: + raise NotImplementedError(f"No delete method implemented for {stream}.") + + def delete_contact_lists(self, records=[], count=10): + """ + https://legacydocs.hubspot.com/docs/methods/lists/delete_list + """ + if not records: + records = self.get_contact_lists() + + record_ids_to_delete = [record['listId'] for record in records] + if len(record_ids_to_delete) == 1 or \ + len(record_ids_to_delete) <= count: + raise RuntimeError( + "delete count is greater or equal to the number of existing records for contact_lists, " + "need to have at least one record remaining" + ) + for record_id in record_ids_to_delete[:count]: + url = f"{BASE_URL}/contacts/v1/lists/{record_id}" + + self.delete(url) + + def delete_deal_pipelines(self, records=[], count=10): + """ + Delete older records based on timestamp primary key + https://legacydocs.hubspot.com/docs/methods/pipelines/delete_pipeline + """ + if not records: + records = self.get_deal_pipelines() + + record_ids_to_delete = [record['pipelineId'] for record in records] + if len(record_ids_to_delete) == 1 or \ + len(record_ids_to_delete) <= count: + raise RuntimeError( + "delete count is greater or equal to the number of existing records for deal_pipelines, " + "need to have at least one record remaining" + ) + for record_id in record_ids_to_delete: + if record_id == 'default' or len( + record_id) > 16: # not a timestamp, not made by this client + continue # skip + + url = f"{BASE_URL}/crm-pipelines/v1/pipelines/deals/{record_id}" + self.delete(url) + + count -= 1 + if count == 0: + return + + ########################################################################## + ### OAUTH + ########################################################################## + + def acquire_access_token_from_refresh_token(self): + """ + NB: This will need to be updated if authorization is ever updated in the tap. We + attempted to import this from the tap to lessen the maintenance burden, but we + hit issues with the relative import. + """ + payload = { + "grant_type": "refresh_token", + "redirect_uri": self.CONFIG['redirect_uri'], + "refresh_token": self.CONFIG['refresh_token'], + "client_id": self.CONFIG['client_id'], + "client_secret": self.CONFIG['client_secret'], + } + + response = requests.post(BASE_URL + "/oauth/v1/token", data=payload) + response.raise_for_status() + auth = response.json() + self.CONFIG['access_token'] = auth['access_token'] + self.CONFIG['refresh_token'] = auth['refresh_token'] + self.CONFIG['token_expires'] = ( + datetime.datetime.utcnow() + + datetime.timedelta(seconds=auth['expires_in'] - 600)) + self.HEADERS = {'Authorization': f"Bearer {self.CONFIG['access_token']}"} + LOGGER.info(f"TEST CLIENT | Token refreshed. Expires at {self.CONFIG['token_expires']}") + + def __init__(self, start_date=''): + self.BaseTest = HubspotBaseTest() + self.replication_keys = self.BaseTest.expected_replication_keys() + self.CONFIG = self.BaseTest.get_credentials() + self.CONFIG.update(self.BaseTest.get_properties()) + + self.start_date_strf = start_date if start_date else self.CONFIG['start_date'] + self.start_date = datetime.datetime.strptime( + self.start_date_strf, self.BaseTest.START_DATE_FORMAT + ).timestamp() * 1000 + + self.acquire_access_token_from_refresh_token() + + contact_lists_records = self.get_contact_lists(since='all') + deal_pipelines_records = self.get_deal_pipelines() + stream_limitations = {'deal_pipelines': [100, deal_pipelines_records], + 'contact_lists': [1500, contact_lists_records]} + + for stream, limits in stream_limitations.items(): + max_record_count, records = limits + pipeline_count = len(records) + if (max_record_count - pipeline_count) / max_record_count <= 0.1: # at/above 90% of record limit + delete_count = int(max_record_count / 2) + self.cleanup(stream, records, delete_count) + LOGGER.info(f"TEST CLIENT | {delete_count} records deleted from {stream}") diff --git a/archive/tests/client_tester.py b/archive/tests/client_tester.py new file mode 100644 index 0000000..a03db6d --- /dev/null +++ b/archive/tests/client_tester.py @@ -0,0 +1,280 @@ +import json +import time +from client import TestClient +from base import HubspotBaseTest + +class TestHubspotTestClient(HubspotBaseTest): + """ + Test the basic functionality of our Test Client. This is a tool for sanity checks, nothing more. + + To check an individual crud method, uncomment the corresponding test case below, and execute this file + as if it is a normal tap-tester test via bin/run-test. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.test_client = TestClient(self.get_properties()['start_date']) + + ########################################################################## + ### TESTING CREATES + ########################################################################## + + # def test_contacts_create(self): + # # Testing contacts Post + # old_records = self.test_client.get_contacts() + # our_record = self.test_client.create_contacts() + # new_records = self.test_client.get_contacts() + # assert len(old_records) < len(new_records), \ + # f"Before contacts post found {len(old_records)} records. After post found {len(new_records)} records" + + # def test_contacts_create_stability(self): + # old_records = self.test_client.get_contacts() + # our_record = self.test_client.create_contacts() + # responses = [] + # for i in range(10): + # new_records = self.test_client.get_contacts() + # responses.append(new_records) + # time.sleep(1) + # all_versions = [record['versionTimestamp'] for response in responses + # for record in response if record['vid'] == our_record[0]['vid']] + # from pprint import pprint as pp + # pp(all_versions) + + # def test_companies_create(self): + # # Testing companies Post + + # old_records = self.test_client.get_companies('2021-08-25T00:00:00.000000Z') + # our_record = self.test_client.create_companies() + # now = time.time() + # time.sleep(6) + + # new_records = self.test_client.get_companies('2021-08-25T00:00:00.000000Z') + # time_for_get = time.time()-now + # print(time_for_get) + + # assert len(old_records) < len(new_records), \ + # f"Before companies post found {len(old_records)} records. After post found {len(new_records)} records" + + # def test_contact_lists_create(self): + # # Testing contact_lists POST + + # old_records = self.test_client.get_contact_lists() + # our_record = self.test_client.create_contact_lists() + # new_records = self.test_client.get_contact_lists() + + # assert len(old_records) < len(new_records), \ + # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" + + + # def test_contacts_by_company_create(self): + # # Testing contacts_by_company PUT + + + # old_contact_records = self.test_client.get_contacts() + # old_company_records = self.test_client.get_companies('2021-08-25T00:00:00.000000Z') + # old_records = self.test_client.get_contacts_by_company([old_company_records[0]["companyId"]]) + # our_record = self.test_client.create_contacts_by_company() + # new_records = self.test_client.get_contacts_by_company([old_company_records[0]["companyId"]]) + # assert len(old_records) < len(new_records), \ + # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" + + + # def test_deal_pipelines_create(self): + # # Testing deal_pipelines POST + + # old_records = self.test_client.get_deal_pipelines() + # our_record = self.test_client.create_deal_pipelines() + # new_records = self.test_client.get_deal_pipelines() + # assert len(old_records) < len(new_records), \ + # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" + + # def test_deal_pipelines_deletes(self): + # # Testing deal_pipelines DELETE + # import ipdb; ipdb.set_trace() + # 1+1 + # our_record = self.test_client.create_deal_pipelines() + # old_records = self.test_client.get_deal_pipelines() + # delete_records = self.test_client.delete_deal_pipelines(1) + # new_records = self.test_client.get_deal_pipelines() + # assert len(old_records) > len(new_records), \ + # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" + + # def test_deals_create(self): + # # Testing deals POST + + # old_records = self.test_client.get_deals() + # our_record = self.test_client.create_deals() + # new_records = self.test_client.get_deals() + # assert len(old_records) < len(new_records), \ + # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" + + + # def test_subscription_changes_and_email_events_create(self): + # # Testing subscription_changes and email_events POST + + # old_emails = self.test_client.get_email_events() + # old_subs = self.test_client.get_subscription_changes() + # our_record = self.test_client.create_subscription_changes() + # time.sleep(10) + # new_subs = self.test_client.get_subscription_changes() + # new_emails = self.test_client.get_email_events() + + # assert len(old_subs) < len(new_subs), \ + # f"Before post found {len(old_subs)} subs. After post found {len(new_subs)} subs" + # assert len(old_emails) < len(new_emails), \ + # f"Before post found {len(old_emails)} emails. After post found {len(new_emails)} emails" + # print(f"Before {len(old_subs)} subs. After found {len(new_subs)} subs") + # print(f"Before {len(old_emails)} emails. After found {len(new_emails)} emails") + + # def test_engagements_create(self): + # # Testing create_engagements POST + + # old_records = self.test_client.get_engagements() + # our_record = self.test_client.create_engagements() + # new_records = self.test_client.get_engagements() + # assert len(old_records) < len(new_records), \ + # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" + + + # def test_forms_create(self): + # # Testing create_forms POST + # old_records = self.test_client.get_forms() + # our_record = self.test_client.create_forms() + # new_records = self.test_client.get_forms() + # assert len(old_records) < len(new_records), \ + # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" + + + # def test_workflows_create(self): + # # Testing create_workflows POST + + # old_records = self.test_client.get_workflows() + # our_record = self.test_client.create_workflows() + # new_records = self.test_client.get_workflows() + # assert len(old_records) < len(new_records), \ + # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" + + + ########################################################################## + ### TESTING UPDATES + ########################################################################## + + + # def test_workflows_update(self): # TODO This failed to change the record + # # Testing update_workflows POST + + # # grab a contact's email to use as the update + # contacts = self.test_client.get_contacts() + # for contact in contacts: + # if contact['properties'].get('email'): + # contact_email = contact['properties']['email']['value'] + # break + + # # old + # workflow = self.test_client.create('workflows') + # workflow_id = workflow[0]['id'] + # old_record = self.test_client._get_workflows_by_pk(workflow_id=workflow_id) + + + # # do the update + # our_record = self.test_client.update_workflows(workflow_id=workflow_id, contact_email=contact_email) + + # # new + # new_record = self.test_client._get_workflows_by_pk(workflow_id=workflow_id) + + # self.assertNotEqual(old_record, new_record) + + # def test_contacts_update(self): + # new_record = self.test_client.create_contacts() + # record_vid = new_record[0]['vid'] + # old_email = new_record[0]['properties']['email']['value'] + + # updated_record = self.test_client.update_contacts(record_vid) + + # self.assertNotEqual(updated_record[0]['properties']['email']['value'], old_email) + + # def test_campaigns_update(self): TODO + # """No endpoint found.""" + # self.fail("test_campaigns_update not implmented") + + # def test_companies_update(self): + # initial_record = self.test_client.create_companies() + # time.sleep(6) + # record_id = initial_record[0]['companyId'] + # initial_value = initial_record[0]['properties']['description']['value'] + + # updated_record = self.test_client.update_companies(record_id) + # updated_value = updated_record['properties']['description']['value'] + + # self.assertNotEqual(initial_value, updated_value) + + # def test_contact_lists_update(self): + # initial_record = self.test_client.create_contact_lists() + + # record_id = initial_record[0]['listId'] + # initial_value = initial_record[0]['name'] + + # updated_record = self.test_client.update_contact_lists(record_id) + # updated_value = updated_record['name'] + + # self.assertNotEqual(initial_value, updated_value) + + # def test_deal_pipelines_update(self): + # initial_record = self.test_client.get_deal_pipelines() + + # record_id = initial_record[0]['pipelineId'] + # initial_value = initial_record[0]['label'] + + # updated_record = self.test_client.update_deal_pipelines(record_id) + # updated_value = updated_record['label'] + + # self.assertNotEqual(initial_value, updated_value) + + # def test_deals_update(self): + # initial_record = self.test_client.get_deals() + + # record_id = initial_record[0]['dealId'] + # initial_value = initial_record[0]['properties']['dealname']['value'] + + # updated_record = self.test_client.update_deals(record_id) + # updated_value = updated_record['properties']['dealname']['value'] + + # self.assertNotEqual(initial_value, updated_value) + + # def test_forms_update(self): + # initial_record = self.test_client.get_forms() + + # record_id = initial_record[0]['guid'] + # initial_value = initial_record[0]['name'] + + # updated_record = self.test_client.update_forms(record_id) + # updated_value = updated_record['name'] + + # self.assertNotEqual(initial_value, updated_value) + + # def test_owners_update(self): TODO + # """No endpoint found.""" + # self.fail("test_owners_update not implmented") + + # def test_engagements_update(self): + # initial_record = self.test_client.get_engagements() + + # record_id = initial_record[0]['engagement_id'] + # initial_value = initial_record[0]['metadata'] + + # updated_record = self.test_client.update_engagements(record_id) + # updated_value = updated_record['metadata'] + + # self.assertNotEqual(initial_value, updated_value) + + ########################################################################## + ### TODO updates + ########################################################################## + # def test_contacts_by_company_update(self): + # pass + + # def test_email_events_update(self): + # pass + + + # def test_subscription_changes_update(self): + # pass diff --git a/archive/tests/test_hubspot_all_fields.py b/archive/tests/test_hubspot_all_fields.py new file mode 100644 index 0000000..2693fa7 --- /dev/null +++ b/archive/tests/test_hubspot_all_fields.py @@ -0,0 +1,327 @@ +import datetime + +import tap_tester.connections as connections +import tap_tester.menagerie as menagerie +import tap_tester.runner as runner +from tap_tester import LOGGER + +from base import HubspotBaseTest +from client import TestClient + +def get_matching_actual_record_by_pk(expected_primary_key_dict, actual_records): + ret_records = [] + can_save = True + for record in actual_records: + for key, value in expected_primary_key_dict.items(): + actual_value = record[key] + if actual_value != value: + can_save = False + break + if can_save: + ret_records.append(record) + can_save = True + return ret_records + +FIELDS_ADDED_BY_TAP = { + # In 'contacts' streams 'versionTimeStamp' is not available in response of the second call. + # In the 1st call, Tap retrieves records of all contacts and from those records, it collects vids(id of contact). + # These records contain the versionTimestamp field. + # In the 2nd call, vids collected from the 1st call will be used to retrieve the whole contact record. + # Here, the records collected for detailed contact information do not contain the versionTimestamp field. + # So, we add the versionTimestamp field(fetched from 1st call records) explicitly in the record of 2nd call. + "contacts": { "versionTimestamp" } +} + +KNOWN_EXTRA_FIELDS = { + 'deals': { + # BUG_TDL-14993 | https://jira.talendforge.org/browse/TDL-14993 + # Has an value of object with key 'value' and value 'Null' + 'property_hs_date_entered_1258834', + }, +} + +KNOWN_MISSING_FIELDS = { + 'contacts':{ # BUG https://jira.talendforge.org/browse/TDL-16016 + 'property_hs_latest_source_data_2', + 'property_hs_latest_source', + 'property_hs_latest_source_data_1', + 'property_hs_timezone', + 'property_hs_latest_source_timestamp', + }, + 'contact_lists': { # BUG https://jira.talendforge.org/browse/TDL-14996 + 'authorId', + 'teamIds', + 'internal', + 'ilsFilterBranch', + 'limitExempt', + }, + 'email_events': { # BUG https://jira.talendforge.org/browse/TDL-14997 + 'portalSubscriptionStatus', + 'attempt', + 'source', + 'subscriptions', + 'sourceId', + 'replyTo', + 'suppressedMessage', + 'bcc', + 'suppressedReason', + 'cc', + }, + 'engagements': { # BUG https://jira.talendforge.org/browse/TDL-14997 + 'scheduledTasks', + }, + 'workflows': { # BUG https://jira.talendforge.org/browse/TDL-14998 + 'migrationStatus', + 'updateSource', + 'description', + 'originalAuthorUserId', + 'lastUpdatedByUserId', + 'creationSource', + 'portalId', + 'contactCounts', + }, + 'owners': { # BUG https://jira.talendforge.org/browse/TDL-15000 + 'activeSalesforceId' + }, + 'forms': { # BUG https://jira.talendforge.org/browse/TDL-15001 + 'alwaysCreateNewCompany', + 'themeColor', + 'publishAt', + 'editVersion', + 'embedVersion', + 'themeName', + 'style', + 'thankYouMessageJson', + 'createMarketableContact', + 'kickbackEmailWorkflowId', + 'businessUnitId', + 'portableKey', + 'parentId', + 'kickbackEmailsJson', + 'unpublishAt', + 'internalUpdatedAt', + 'multivariateTest', + 'publishedAt', + 'customUid', + 'isPublished', + 'paymentSessionTemplateIds', + 'selectedExternalOptions', + }, + 'companies': { # BUG https://jira.talendforge.org/browse/TDL-15003 + 'mergeAudits', + 'stateChanges', + 'isDeleted', + 'additionalDomains', + 'property_hs_analytics_latest_source', + 'property_hs_analytics_latest_source_data_2', + 'property_hs_analytics_latest_source_data_1', + 'property_hs_analytics_latest_source_timestamp', + }, + 'campaigns': { # BUG https://jira.talendforge.org/browse/TDL-15003 + 'lastProcessingStateChangeAt', + 'lastProcessingFinishedAt', + 'processingState', + 'lastProcessingStartedAt', + }, + 'deals': { # BUG https://jira.talendforge.org/browse/TDL-14999 + 'imports', + 'property_hs_num_associated_deal_splits', + 'property_hs_is_deal_split', + 'stateChanges', + 'property_hs_num_associated_active_deal_registrations', + 'property_hs_num_associated_deal_registrations', + 'property_hs_analytics_latest_source', + 'property_hs_analytics_latest_source_timestamp_contact', + 'property_hs_analytics_latest_source_data_1_contact', + 'property_hs_analytics_latest_source_timestamp', + 'property_hs_analytics_latest_source_data_1', + 'property_hs_analytics_latest_source_contact', + 'property_hs_analytics_latest_source_company', + 'property_hs_analytics_latest_source_data_1_company', + 'property_hs_analytics_latest_source_data_2_company', + 'property_hs_analytics_latest_source_data_2', + 'property_hs_analytics_latest_source_data_2_contact', + }, + 'subscription_changes':{ + 'normalizedEmailId' + } +} + + +class TestHubspotAllFields(HubspotBaseTest): + """Test that with all fields selected for a stream we replicate data as expected""" + @staticmethod + def name(): + return "tt_hubspot_all_fields_dynamic" + + def streams_under_test(self): + """expected streams minus the streams not under test""" + return self.expected_streams().difference({ + 'owners', + 'subscription_changes', # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 + }) + + def setUp(self): + self.maxDiff = None # see all output in failure + + test_client = TestClient(start_date=self.get_properties()['start_date']) + self.expected_records = dict() + streams = self.streams_under_test() + stream_to_run_last = 'contacts_by_company' + if stream_to_run_last in streams: + streams.remove(stream_to_run_last) + streams = list(streams) + streams.append(stream_to_run_last) + + for stream in streams: + # Get all records + if stream == 'contacts_by_company': + company_ids = [company['companyId'] for company in self.expected_records['companies']] + self.expected_records[stream] = test_client.read(stream, parent_ids=company_ids) + else: + self.expected_records[stream] = test_client.read(stream) + + for stream, records in self.expected_records.items(): + LOGGER.info("The test client found %s %s records.", len(records), stream) + + + self.convert_datatype(self.expected_records) + + def convert_datatype(self, expected_records): + for stream, records in expected_records.items(): + for record in records: + + # convert timestamps to string formatted datetime + timestamp_keys = {'timestamp'} + for key in timestamp_keys: + timestamp = record.get(key) + if timestamp: + unformatted = datetime.datetime.fromtimestamp(timestamp/1000) + formatted = datetime.datetime.strftime(unformatted, self.BASIC_DATE_FORMAT) + record[key] = formatted + + return expected_records + + def test_run(self): + conn_id = connections.ensure_connection(self) + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # Select only the expected streams tables + expected_streams = self.streams_under_test() + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] + for catalog_entry in catalog_entries: + stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) + connections.select_catalog_and_fields_via_metadata( + conn_id, + catalog_entry, + stream_schema + ) + + # Run sync + first_record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records = runner.get_records_from_target_output() + + # Test by Stream + for stream in expected_streams: + with self.subTest(stream=stream): + + # gather expected values + replication_method = self.expected_replication_method()[stream] + primary_keys = sorted(self.expected_primary_keys()[stream]) + + # gather replicated records + actual_records = [message['data'] + for message in synced_records[stream]['messages'] + if message['action'] == 'upsert'] + + for expected_record in self.expected_records[stream]: + + primary_key_dict = {primary_key: expected_record[primary_key] for primary_key in primary_keys} + primary_key_values = list(primary_key_dict.values()) + + with self.subTest(expected_record=primary_key_dict): + # grab the replicated record that corresponds to expected_record by checking primary keys + matching_actual_records_by_pk = get_matching_actual_record_by_pk(primary_key_dict, actual_records) + if not matching_actual_records_by_pk: + LOGGER.warn("Expected %s record was not replicated: %s", + stream, primary_key_dict) + continue # skip this expected record if it isn't replicated + actual_record = matching_actual_records_by_pk[0] + + expected_keys = set(expected_record.keys()).union(FIELDS_ADDED_BY_TAP.get(stream, {})) + actual_keys = set(actual_record.keys()) + + # NB: KNOWN_MISSING_FIELDS is a dictionary of streams to aggregated missing fields. + # We will check each expected_record to see which of the known keys is present in expectations + # and then will add them to the known_missing_keys set. + known_missing_keys = set() + for missing_key in KNOWN_MISSING_FIELDS.get(stream, set()): + if missing_key in expected_record.keys(): + known_missing_keys.add(missing_key) + del expected_record[missing_key] + + # NB : KNOWN_EXTRA_FIELDS is a dictionary of streams to fields that should not + # be replicated but are. See the variable declaration at top of file for linked BUGs. + known_extra_keys = set() + for extra_key in KNOWN_EXTRA_FIELDS.get(stream, set()): + known_extra_keys.add(extra_key) + + # Verify the fields in our expected record match the fields in the corresponding replicated record + expected_keys_adjusted = expected_keys.union(known_extra_keys) + actual_keys_adjusted = actual_keys.union(known_missing_keys) + + # NB: The following woraround is for dynamic fields on the `deals` stream that we just can't track. + # At the time of implementation there is no customer feedback indicating that these dynamic fields + # would prove useful to an end user. The ones that we replicated with the test client are specific + # to our test data. We have determined that the filtering of these fields is an expected behavior. + + # deals workaround for 'property_hs_date_entered_' fields + bad_key_prefixes = {'property_hs_date_entered_', 'property_hs_date_exited_'} + bad_keys = set() + for key in expected_keys_adjusted: + for prefix in bad_key_prefixes: + if key.startswith(prefix) and key not in actual_keys_adjusted: + bad_keys.add(key) + for key in actual_keys_adjusted: + for prefix in bad_key_prefixes: + if key.startswith(prefix) and key not in expected_keys_adjusted: + bad_keys.add(key) + for key in bad_keys: + if key in expected_keys_adjusted: + expected_keys_adjusted.remove(key) + elif key in actual_keys_adjusted: + actual_keys_adjusted.remove(key) + + self.assertSetEqual(expected_keys_adjusted, actual_keys_adjusted) + + # Future Testing | TDL-16145 + # self.assertDictEqual(expected_record, actual_record) + + # Toss out a warn if tap is replicating more than the expected records were replicated + expected_primary_key_values = {tuple([record[primary_key] + for primary_key in primary_keys]) + for record in self.expected_records[stream]} + actual_records_primary_key_values = {tuple([record[primary_key] + for primary_key in primary_keys]) + for record in actual_records} + if expected_primary_key_values.issubset(actual_records_primary_key_values): + LOGGER.warn("Unexpected %s records replicated: %s", + stream, + actual_records_primary_key_values - expected_primary_key_values) + + +class TestHubspotAllFieldsStatic(TestHubspotAllFields): + @staticmethod + def name(): + return "tt_hubspot_all_fields_static" + + def streams_under_test(self): + """expected streams minus the streams not under test""" + return { + 'owners', + # 'subscription_changes', # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 + } + + def get_properties(self): + return {'start_date' : '2021-05-02T00:00:00Z'} diff --git a/archive/tests/test_hubspot_automatic_fields.py b/archive/tests/test_hubspot_automatic_fields.py new file mode 100644 index 0000000..693f5ff --- /dev/null +++ b/archive/tests/test_hubspot_automatic_fields.py @@ -0,0 +1,109 @@ +import tap_tester.connections as connections +import tap_tester.menagerie as menagerie +import tap_tester.runner as runner +import re + +from base import HubspotBaseTest + +STATIC_DATA_STREAMS = {'owners'} + +class TestHubspotAutomaticFields(HubspotBaseTest): + @staticmethod + def name(): + return "tt_hubspot_automatic" + + def streams_to_test(self): + """streams to test""" + return self.expected_streams() - STATIC_DATA_STREAMS + + def test_run(self): + """ + Verify we can deselect all fields except when inclusion=automatic, which is handled by base.py methods + Verify that only the automatic fields are sent to the target. + """ + conn_id = connections.ensure_connection(self) + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # Select only the expected streams tables + expected_streams = self.streams_to_test() + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] + self.select_all_streams_and_fields(conn_id, catalog_entries, select_all_fields=False) + + # Include the following step in this test if/when hubspot conforms to the standards of metadata + # See bugs BUG_TDL-9939 and BUG_TDL-14938 + + # # Verify our selection resulted in no fields selected except for those with inclusion of 'automatic' + # catalogs_selection = menagerie.get_catalogs(conn_id) + # for cat in catalogs_selection: + # with self.subTest(cat=cat): + # catalog_entry = menagerie.get_annotated_schema(conn_id, cat['stream_id']) + + # # Verify the expected stream tables are selected + # selected = catalog_entry.get('annotated-schema').get('selected') + # print("Validating selection on {}: {}".format(cat['stream_name'], selected)) + # if cat['stream_name'] not in expected_streams: + # self.assertFalse(selected, msg="Stream selected, but not testable.") + # continue # Skip remaining assertions if we aren't selecting this stream + # self.assertTrue(selected, msg="Stream not selected.") + + # # Verify only automatic fields are selected + # expected_automatic_fields = self.expected_automatic_fields().get(cat['tap_stream_id']) + # selected_fields = self.get_selected_fields_from_metadata(catalog_entry['metadata']) + + # # remove replication keys + # self.assertEqual(expected_automatic_fields, selected_fields) + + # Run a sync job using orchestrator + sync_record_count = self.run_and_verify_sync(conn_id) + synced_records = runner.get_records_from_target_output() + + # Assert the records for each stream + for stream in expected_streams: + with self.subTest(stream=stream): + + # Verify that data is present + record_count = sync_record_count.get(stream, 0) + self.assertGreater(record_count, 0) + + data = synced_records.get(stream) + record_messages_keys = [set(row['data'].keys()) for row in data['messages']] + expected_keys = self.expected_automatic_fields().get(stream) + + # BUG_TDL-9939 https://jira.talendforge.org/browse/TDL-9939 Replication keys are not included as an automatic field for these streams + if stream in {'subscription_changes', 'email_events'}: + # replication keys not in the expected_keys + remove_keys = self.expected_metadata()[stream].get(self.REPLICATION_KEYS) + expected_keys = expected_keys.difference(remove_keys) + elif stream in {'engagements'}: + # engagements has a nested object 'engagement' with the automatic fields + expected_keys = expected_keys.union({'engagement'}) + # Verify that only the automatic fields are sent to the target + for actual_keys in record_messages_keys: + self.assertSetEqual(actual_keys, expected_keys, + msg=f"Expected automatic fields: {expected_keys} and nothing else." + ) + + + # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 + # The subscription_changes stream does not have a valid pk to ensure no dupes are sent + if stream != 'subscription_changes': + + # make sure there are no duplicate records by using the pks + pk = self.expected_primary_keys()[stream] + pks_values = [tuple([message['data'][p] for p in pk]) for message in data['messages']] + self.assertEqual(len(pks_values), len(set(pks_values))) + + +class TestHubspotAutomaticFieldsStaticData(TestHubspotAutomaticFields): + def streams_to_test(self): + """streams to test""" + return STATIC_DATA_STREAMS + + @staticmethod + def name(): + return "tt_hubspot_automatic_static" + + def get_properties(self): + return { + 'start_date' : '2021-08-19T00:00:00Z', + } diff --git a/archive/tests/test_hubspot_bookmarks.py b/archive/tests/test_hubspot_bookmarks.py new file mode 100644 index 0000000..fa8a11f --- /dev/null +++ b/archive/tests/test_hubspot_bookmarks.py @@ -0,0 +1,248 @@ +from datetime import datetime, timedelta +from time import sleep + + +import tap_tester.connections as connections +import tap_tester.menagerie as menagerie +import tap_tester.runner as runner + +from base import HubspotBaseTest +from client import TestClient + + +STREAMS_WITHOUT_UPDATES = {'email_events', 'contacts_by_company', 'workflows'} +STREAMS_WITHOUT_CREATES = {'campaigns', 'owners'} + + +class TestHubspotBookmarks(HubspotBaseTest): + """Ensure tap replicates new and upated records based on the replication method of a given stream. + + Create records for each stream. Run check mode, perform table and field selection, and run a sync. + Create 1 record for each stream and update 1 record for each stream prior to running a 2nd sync. + - Verify for each incremental stream you can do a sync which records bookmarks, and that the format matches expectations. + - Verify that a bookmark doesn't exist for full table streams. + - Verify the bookmark is the max value sent to the target for the a given replication key. + - Verify 2nd sync respects the bookmark. + """ + @staticmethod + def name(): + return "tt_hubspot_bookmarks" + + def streams_to_test(self): + """expected streams minus the streams not under test""" + + expected_streams = self.expected_streams().difference(STREAMS_WITHOUT_CREATES) + + return expected_streams.difference({ + 'subscription_changes', # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 + }) + + def get_properties(self): + return { + 'start_date' : datetime.strftime(datetime.today()-timedelta(days=3), self.START_DATE_FORMAT), + } + + def setUp(self): + self.maxDiff = None # see all output in failure + + self.test_client = TestClient(self.get_properties()['start_date']) + + def create_test_data(self, expected_streams): + + self.expected_records = {stream: [] + for stream in expected_streams} + + for stream in expected_streams - {'contacts_by_company'}: + if stream == 'email_events': + email_records = self.test_client.create(stream, times=3) + self.expected_records['email_events'] += email_records + else: + # create records, one will be updated between syncs + for _ in range(3): + record = self.test_client.create(stream) + self.expected_records[stream] += record + + if 'contacts_by_company' in expected_streams: # do last + company_ids = [record['companyId'] for record in self.expected_records['companies']] + contact_records = self.expected_records['contacts'] + for i in range(3): + record = self.test_client.create_contacts_by_company( + company_ids=company_ids, contact_records=contact_records + ) + self.expected_records['contacts_by_company'] += record + + def test_run(self): + expected_streams = self.streams_to_test() + + # generate 3 records for every stream that has a create endpoint + create_streams = expected_streams - STREAMS_WITHOUT_CREATES + self.create_test_data(create_streams) + + conn_id = connections.ensure_connection(self) + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # Select only the expected streams tables + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] + for catalog_entry in catalog_entries: + stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) + connections.select_catalog_and_fields_via_metadata( + conn_id, + catalog_entry, + stream_schema + ) + + # Run sync 1 + first_record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records = runner.get_records_from_target_output() + state_1 = menagerie.get_state(conn_id) + + # Create 1 record for each stream between syncs + for stream in expected_streams - {'contacts_by_company'}: + record = self.test_client.create(stream) + self.expected_records[stream] += record + if 'contacts_by_company' in expected_streams: + company_ids = [record['companyId'] for record in self.expected_records['companies'][:-1]] + contact_records = self.expected_records['contacts'][-1:] + record = self.test_client.create_contacts_by_company( + company_ids=company_ids, contact_records=contact_records + ) + self.expected_records['contacts_by_company'] += record + + + # Update 1 record from the test seutp for each stream that has an update endpoint + for stream in expected_streams - STREAMS_WITHOUT_UPDATES: + primary_key = list(self.expected_primary_keys()[stream])[0] + record_id = self.expected_records[stream][0][primary_key] + record = self.test_client.update(stream, record_id) + self.expected_records[stream].append(record) + + #run second sync + second_record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records_2 = runner.get_records_from_target_output() + state_2 = menagerie.get_state(conn_id) + + # Test by Stream + for stream in expected_streams: + + with self.subTest(stream=stream): + + # gather expected values + replication_method = self.expected_replication_method()[stream] + primary_keys = self.expected_primary_keys()[stream] + + # setting expected records for sync 1 based on the unsorted list of record + # which does not inclue the created record between syncs 1 and 2 + expected_records_1 = self.expected_records[stream][:3] + + # gather replicated records + actual_record_count_2 = second_record_count_by_stream[stream] + actual_records_2 = [message['data'] + for message in synced_records_2[stream]['messages'] + if message['action'] == 'upsert'] + actual_record_count_1 = first_record_count_by_stream[stream] + actual_records_1 = [message['data'] + for message in synced_records[stream]['messages'] + if message['action'] == 'upsert'] + + if self.is_child(stream): # we will set expectations for child streeams based on the parent + + parent_stream = self.expected_metadata()[stream][self.PARENT_STREAM] + parent_replication_method = self.expected_replication_method()[parent_stream] + + if parent_replication_method == self.INCREMENTAL: + + expected_record_count = 1 if stream not in STREAMS_WITHOUT_UPDATES else 2 + expected_records_2 = self.expected_records[stream][-expected_record_count:] + + # verify the record count matches our expectations for a child streams with incremental parents + self.assertGreater(actual_record_count_1, actual_record_count_2) + + elif parent_replication_method == self.FULL: + + # verify the record count matches our expectations for child streams with full table parents + expected_records_2 = self.expected_records[stream] + self.assertEqual(actual_record_count_1 + 1, actual_record_count_2) + + else: + raise AssertionError(f"Replication method is {replication_method} for stream: {stream}") + + + elif replication_method == self.INCREMENTAL: + + # NB: FOR INCREMENTAL STREAMS the tap does not replicate the replication-key for any records. + # It does functionaly replicate as a standard incremental sync would but does not order + # records by replication-key value (since it does not exist on the record). To get around + # this we are putting the replication-keys on our expected records via test_client. We will + # verify the records we expect (via primary-key) are replicated prior to checking the + # replication-key values. + + # get saved states + stream_replication_key = list(self.expected_replication_keys()[stream])[0] + bookmark_1 = state_1['bookmarks'][stream][stream_replication_key] + bookmark_2 = state_2['bookmarks'][stream][stream_replication_key] + + # setting expected records knowing they are ordered by replication-key value + expected_record_count = 1 if stream not in STREAMS_WITHOUT_UPDATES else 2 + expected_records_2 = self.expected_records[stream][-expected_record_count:] + + # Given streams does not contain proper replication-key value in the response. + if stream not in {"companies","deals","contacts_by_company","email_events"}: + # verify first sync bookmark value is max bookmark value + max_bk_value = actual_records_1[0].get(stream_replication_key) + for record in actual_records_1: + replication_key_value = record.get(stream_replication_key) + if max_bk_value < replication_key_value: + max_bk_value = replication_key_value + + # For few streams, test records updated before sync may have replication value + # greater than bookmark value probably due delayed records updates pickup by Hubspot + self.assertLessEqual(bookmark_1, max_bk_value, + msg="First sync bookmark value cannot be greater than max replication-key value") + + # verify second sync bookmark value is max bookmark value + max_bk_value = actual_records_2[0].get(stream_replication_key) + for record in actual_records_2: + replication_key_value = record.get(stream_replication_key) + if max_bk_value < replication_key_value: + max_bk_value = replication_key_value + + # For few streams, test records updated before sync may have replication value + # greater than bookmark value probably due delayed records updates pickup by Hubspot + self.assertLessEqual(bookmark_2, max_bk_value, + msg="Second sync bookmark value cannot be greater than max replication-key value") + + # verify only the new and updated records are captured checking record countx + self.assertGreater(actual_record_count_1, actual_record_count_2) + + # verify the state was updated with incremented bookmark + if stream != 'email_events': # BUG TDL-15706 + self.assertGreater(bookmark_2, bookmark_1) + + elif replication_method == self.FULL: + expected_records_2 = self.expected_records[stream] + self.assertEqual(actual_record_count_1 + 1, actual_record_count_2) + + else: + raise AssertionError(f"Replication method is {replication_method} for stream: {stream}") + + # verify by primary key that all expected records are replicated in sync 1 + sync_1_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_1] + expected_sync_1_pks = [tuple([record[pk] for pk in primary_keys]) + for record in expected_records_1] + for expected_pk in expected_sync_1_pks: + self.assertIn(expected_pk, sync_1_pks) + + # verify by primary key that all expected records are replicated in sync 2 + sync_2_pks = sorted([tuple([record[pk] for pk in primary_keys]) for record in actual_records_2]) + expected_sync_2_pks = sorted([tuple([record[pk] for pk in primary_keys]) + for record in expected_records_2]) + for expected_pk in expected_sync_2_pks: + self.assertIn(expected_pk, sync_2_pks) + + # verify that at least 1 record from the first sync is replicated in the 2nd sync + # to prove that the bookmarking is inclusive + if stream in {'companies', # BUG | https://jira.talendforge.org/browse/TDL-15503 + 'email_events'}: # BUG | https://jira.talendforge.org/browse/TDL-15706 + continue # skipping failures + self.assertTrue(any([expected_pk in sync_2_pks for expected_pk in expected_sync_1_pks])) diff --git a/archive/tests/test_hubspot_bookmarks_static.py b/archive/tests/test_hubspot_bookmarks_static.py new file mode 100644 index 0000000..bbbda3e --- /dev/null +++ b/archive/tests/test_hubspot_bookmarks_static.py @@ -0,0 +1,127 @@ +from datetime import datetime, timedelta +from time import sleep +import copy + +import tap_tester.connections as connections +import tap_tester.menagerie as menagerie +import tap_tester.runner as runner + +from base import HubspotBaseTest +from client import TestClient + + +STREAMS_WITHOUT_CREATES = {'campaigns', 'owners'} + + +class TestHubspotBookmarks(HubspotBaseTest): + """Test basic bookmarking and replication for streams that do not have CRUD capability.""" + @staticmethod + def name(): + return "tt_hubspot_bookmarks_static" + + def streams_to_test(self): + """expected streams minus the streams not under test""" + return STREAMS_WITHOUT_CREATES + + def get_properties(self): + # 'start_date' : '2021-08-19T00:00:00Z' + return {'start_date' : '2017-11-22T00:00:00Z'} + + def setUp(self): + self.maxDiff = None # see all output in failure + + + def test_run(self): + expected_streams = self.streams_to_test() + + conn_id = connections.ensure_connection(self) + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # Select only the expected streams tables + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] + for catalog_entry in catalog_entries: + stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) + connections.select_catalog_and_fields_via_metadata( + conn_id, + catalog_entry, + stream_schema + ) + + # Run sync 1 + first_record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records = runner.get_records_from_target_output() + state_1 = menagerie.get_state(conn_id) + + # Update state to simulate a bookmark + new_state = copy.deepcopy(state_1) + for stream in state_1['bookmarks'].keys(): + if self.expected_replication_method()[stream] == self.INCREMENTAL: + calculated_bookmark_value = self.timedelta_formatted( + state_1['bookmarks']['owners']['updatedAt'], days=-1, str_format=self.BASIC_DATE_FORMAT + ) + + menagerie.set_state(conn_id, new_state) + + # run second sync + second_record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records_2 = runner.get_records_from_target_output() + state_2 = menagerie.get_state(conn_id) + + # Test by Stream + for stream in expected_streams: + + with self.subTest(stream=stream): + + # gather expected values + replication_method = self.expected_replication_method()[stream] + primary_keys = self.expected_primary_keys()[stream] + + # gather replicated records + actual_record_count_2 = second_record_count_by_stream[stream] + actual_records_2 = [message['data'] + for message in synced_records_2[stream]['messages'] + if message['action'] == 'upsert'] + actual_record_count_1 = first_record_count_by_stream[stream] + actual_records_1 = [message['data'] + for message in synced_records[stream]['messages'] + if message['action'] == 'upsert'] + + # NB: There are no replication-key values on records and so we cannot confirm that the records, + # replicated respect the bookmark via direct comparison. All we can do is verify syncs correspond + # to the repliaction methods logically by strategically setting the simulated. + + if replication_method == self.INCREMENTAL: + + # get saved states + stream_replication_key = list(self.expected_replication_keys()[stream])[0] + bookmark_1 = state_1['bookmarks'][stream][stream_replication_key] + bookmark_2 = state_2['bookmarks'][stream][stream_replication_key] + + # verify the uninterrupted sync and the simulated sync end with the same bookmark values + self.assertEqual(bookmark_1, bookmark_2) + + # trim records down to just the primary key values + sync_1_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_1] + sync_2_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_2] + # ensure no dupe records present + self.assertCountEqual(set(sync_1_pks), sync_1_pks) + self.assertCountEqual(set(sync_2_pks), sync_2_pks) + + # verify the records from sync 1 are not present in sync 2 as the simulated state + # does not correspond to a specific record's replication-key value + self.assertTrue(set(sync_2_pks).issubset(set(sync_1_pks))) + + # verify there are more records in sync 1 than in sync 2 (proper setup required for this) + self.assertGreater(actual_record_count_1, actual_record_count_2) + + elif replication_method == self.FULL: + + # verify the same number of records were replicated in each sync + self.assertEqual(actual_record_count_1, actual_record_count_2) + + # verify the exact same records were replicated in each sync + self.assertEqual(actual_records_1, actual_records_2) + + else: + raise AssertionError(f"Replication method is {replication_method} for stream: {stream}") diff --git a/archive/tests/test_hubspot_child_stream_only.py b/archive/tests/test_hubspot_child_stream_only.py new file mode 100644 index 0000000..a1ffc42 --- /dev/null +++ b/archive/tests/test_hubspot_child_stream_only.py @@ -0,0 +1,88 @@ +"""Test tap field selection of child streams without its parent.""" +import re +from datetime import datetime as dt +from datetime import timedelta + +from tap_tester import connections +from tap_tester import menagerie +from tap_tester import runner + +from base import HubspotBaseTest +from client import TestClient + + +class FieldSelectionChildTest(HubspotBaseTest): + """Test tap field selection of child streams without its parent.""" + + @staticmethod + def name(): + return "tt_hubspot_child_streams" + + def get_properties(self): + return { + 'start_date' : dt.strftime(dt.today()-timedelta(days=2), self.START_DATE_FORMAT) + } + + def setUp(self): + test_client = TestClient(start_date=self.get_properties()['start_date']) + + contact = test_client.create('contacts') + company = test_client.create('companies')[0] + contact_by_company = test_client.create_contacts_by_company( + company_ids=[company['companyId']], + contact_records=contact + ) + + def test_run(self): + """ + Verify that when a child stream is selected without its parent that + • a critical error in the tap occurs + • the error indicates which parent stream needs to be selected + • when the parent is selected the tap doesn't critical error + """ + streams_to_test = {"contacts_by_company"} + + conn_id = self.create_connection_and_run_check() + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # Select only the expected streams tables + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in streams_to_test] + + for catalog_entry in catalog_entries: + stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) + connections.select_catalog_and_fields_via_metadata( + conn_id, + catalog_entry, + stream_schema + ) + + # Run a sync job using orchestrator + sync_job_name = runner.run_sync_mode(self, conn_id) + + # Verify tap and target exit codes + exit_status = menagerie.get_exit_status(conn_id, sync_job_name) + + # Verify that the tap error message shows you need to select the parent stream + self.assertRaises(AssertionError, menagerie.verify_sync_exit_status, self, exit_status, sync_job_name) + self.assertEqual(exit_status['tap_error_message'], + ('Unable to extract contacts_by_company data. ' + 'To receive contacts_by_company data, you also need to select companies.')) + + # Verify there is no discovery or target error + self.assertEqual(exit_status['target_exit_status'], 0) + self.assertEqual(exit_status['discovery_exit_status'], 0) + + # Select only child and required parent and make sure there is no critical error + streams_to_test = {"contacts_by_company", "companies"} + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in streams_to_test] + for catalog_entry in catalog_entries: + stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) + connections.select_catalog_and_fields_via_metadata( + conn_id, + catalog_entry, + stream_schema + ) + + # Run a sync job + self.run_and_verify_sync(conn_id) diff --git a/archive/tests/test_hubspot_discovery.py b/archive/tests/test_hubspot_discovery.py new file mode 100644 index 0000000..c61ba04 --- /dev/null +++ b/archive/tests/test_hubspot_discovery.py @@ -0,0 +1,131 @@ +"""Test tap discovery mode and metadata/annotated-schema.""" +import re + +from tap_tester import menagerie + +from base import HubspotBaseTest + + +class DiscoveryTest(HubspotBaseTest): + """Test tap discovery mode and metadata/annotated-schema conforms to standards.""" + + @staticmethod + def name(): + return "tt_hubspot_discovery" + + def test_run(self): + """ + Verify that discover creates the appropriate catalog, schema, metadata, etc. + + • Verify number of actual streams discovered match expected + • Verify the stream names discovered were what we expect + • Verify stream names follow naming convention + streams should only have lowercase alphas and underscores + • verify there is only 1 top level breadcrumb + • verify replication key(s) + • verify primary key(s) + • verify that if there is a replication key we are doing INCREMENTAL otherwise FULL + • verify the actual replication matches our expected replication method + • verify that primary, replication and foreign keys + are given the inclusion of automatic (metadata and annotated schema). + • verify that all other fields have inclusion of available (metadata and schema) + """ + streams_to_test = self.expected_streams() + + conn_id = self.create_connection_and_run_check() + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # Verify stream names follow naming convention + # streams should only have lowercase alphas and underscores + found_catalog_names = {c['tap_stream_id'] for c in found_catalogs} + self.assertTrue(all([re.fullmatch(r"[a-z_]+", name) for name in found_catalog_names]), + msg="One or more streams don't follow standard naming") + + for stream in streams_to_test: + with self.subTest(stream=stream): + catalog = next(iter([catalog for catalog in found_catalogs + if catalog["stream_name"] == stream])) + assert catalog # based on previous tests this should always be found + schema_and_metadata = menagerie.get_annotated_schema(conn_id, catalog['stream_id']) + metadata = schema_and_metadata["metadata"] + + # verify there is only 1 top level breadcrumb + stream_properties = [item for item in metadata if item.get("breadcrumb") == []] + self.assertTrue(len(stream_properties) == 1, + msg=f"There is NOT only one top level breadcrumb for {stream}" + \ + "\nstream_properties | {stream_properties}") + + # verify replication key(s) + actual_rep_keys = set(stream_properties[0].get( + "metadata", {self.REPLICATION_KEYS: None}).get( + self.REPLICATION_KEYS, [])) + self.assertEqual( + set(stream_properties[0].get( + "metadata", {self.REPLICATION_KEYS: []}).get(self.REPLICATION_KEYS, [])), + self.expected_replication_keys()[stream], + msg=f"expected replication key {self.expected_replication_keys()[stream]} but actual is {actual_rep_keys}" + ) + + + # verify primary key(s) + actual_primary_keys = set(stream_properties[0].get( "metadata", {self.PRIMARY_KEYS: []}).get(self.PRIMARY_KEYS, [])) + self.assertSetEqual(self.expected_primary_keys()[stream], actual_primary_keys, + msg=f"expected primary key {self.expected_primary_keys()[stream]} but actual is {actual_primary_keys}" + #set(stream_properties[0].get('metadata', {self.PRIMARY_KEYS: None}).get(self.PRIMARY_KEYS, [])))}" + + ) + actual_replication_method = stream_properties[0]['metadata'].get('forced-replication-method') + # BUG https://jira.talendforge.org/browse/TDL-9939 all streams are set to full-table in the metadata + # verify the actual replication matches our expected replication method + if stream == "contacts": + self.assertEqual( + self.expected_replication_method().get(stream, None), + actual_replication_method, + msg="The actual replication method {} doesn't match the expected {}".format( + actual_replication_method, + self.expected_replication_method().get(stream, None))) + + # verify that if there is a replication key we are doing INCREMENTAL otherwise FULL + actual_replication_method = stream_properties[0].get( + "metadata", {self.REPLICATION_METHOD: None}).get(self.REPLICATION_METHOD) + if stream_properties[0].get( + "metadata", {self.REPLICATION_KEYS: []}).get(self.REPLICATION_KEYS, []): + + if stream in ["contacts", "companies", "deals"]: + self.assertTrue(actual_replication_method == self.INCREMENTAL, + msg="Expected INCREMENTAL replication " + "since there is a replication key") + else: + # BUG_TDL-9939 https://jira.talendforge.org/browse/TDL-9939 all streams are set to full table + pass # BUG TDL-9939 REMOVE ME WHEN BUG IS ADDRESSED + + else: + self.assertTrue(actual_replication_method == self.FULL, + msg="Expected FULL replication " + "since there is no replication key") + + expected_primary_keys = self.expected_primary_keys()[stream] + expected_replication_keys = self.expected_replication_keys()[stream] + expected_automatic_fields = expected_primary_keys | expected_replication_keys + + # verify that primary, replication and foreign keys are given the inclusion of automatic in metadata. + # BUG_2 https://jira.talendforge.org/browse/TDL-9772 'inclusion' is not present for replication keys + actual_automatic_fields = {item.get("breadcrumb", ["properties", None])[1] + for item in metadata + if item.get("metadata").get("inclusion") == "automatic"} + if stream in ["contacts", "companies", "deals"]: + self.assertEqual(expected_automatic_fields, + actual_automatic_fields, + msg=f"expected {expected_automatic_fields} automatic fields but got {actual_automatic_fields}" + ) + + # verify that all other fields have inclusion of available + # This assumes there are no unsupported fields for SaaS sources + self.assertTrue( + all({item.get("metadata").get("inclusion") == "available" + for item in metadata + if item.get("breadcrumb", []) != [] + and item.get("breadcrumb", ["properties", None])[1] + not in actual_automatic_fields}), + msg="Not all non key properties are set to available in metadata") diff --git a/archive/tests/test_hubspot_interrupted_sync.py b/archive/tests/test_hubspot_interrupted_sync.py new file mode 100644 index 0000000..61725ab --- /dev/null +++ b/archive/tests/test_hubspot_interrupted_sync.py @@ -0,0 +1,142 @@ +from datetime import datetime, timedelta +from time import sleep +import copy + +import tap_tester.connections as connections +import tap_tester.menagerie as menagerie +import tap_tester.runner as runner + +from base import HubspotBaseTest +from client import TestClient + + +class TestHubspotInterruptedSync1(HubspotBaseTest): + """Testing interrupted syncs for streams that implement unique bookmarking logic.""" + @staticmethod + def name(): + return "tt_hubspot_sync_interrupt_1" + + def streams_to_test(self): + """expected streams minus the streams not under test""" + return {'companies', 'engagements', 'tickets'} + + def simulated_interruption(self, reference_state): + + new_state = copy.deepcopy(reference_state) + + companies_bookmark = self.timedelta_formatted( + reference_state['bookmarks']['companies']['property_hs_lastmodifieddate'], + days=-1, str_format=self.BASIC_DATE_FORMAT + ) + new_state['bookmarks']['companies']['property_hs_lastmodifieddate'] = None + new_state['bookmarks']['companies']['current_sync_start'] = companies_bookmark + + engagements_bookmark = self.timedelta_formatted( + reference_state['bookmarks']['engagements']['lastUpdated'], + days=-1, str_format=self.BASIC_DATE_FORMAT + ) + new_state['bookmarks']['engagements']['lastUpdated'] = None + new_state['bookmarks']['engagements']['current_sync_start'] = engagements_bookmark + + tickets_bookmark = self.timedelta_formatted( + reference_state['bookmarks']['tickets']['updatedAt'], + days=-1, str_format=self.BASIC_DATE_FORMAT) + new_state['bookmarks']['tickets']['updatedAt'] = tickets_bookmark + + return new_state + + def get_properties(self): + # 'start_date' : '2021-08-19T00:00:00Z' + # return {'start_date' : '2017-11-22T00:00:00Z'} + return { + 'start_date' : datetime.strftime( + datetime.today()-timedelta(days=3), self.START_DATE_FORMAT + ), + } + + def setUp(self): + self.maxDiff = None # see all output in failure + + def test_run(self): + + expected_streams = self.streams_to_test() + + conn_id = connections.ensure_connection(self) + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # Select only the expected streams tables + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] + for catalog_entry in catalog_entries: + stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) + connections.select_catalog_and_fields_via_metadata( + conn_id, + catalog_entry, + stream_schema + ) + + # Run sync 1 + first_record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records = runner.get_records_from_target_output() + state_1 = menagerie.get_state(conn_id) + + # Update state to simulate a bookmark + new_state = self.simulated_interruption(state_1) + menagerie.set_state(conn_id, new_state) + + # run second sync + second_record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records_2 = runner.get_records_from_target_output() + state_2 = menagerie.get_state(conn_id) + + # Test by Stream + for stream in expected_streams: + + with self.subTest(stream=stream): + + # gather expected values + replication_method = self.expected_replication_method()[stream] + primary_keys = self.expected_primary_keys()[stream] + + # gather replicated records + actual_record_count_2 = second_record_count_by_stream[stream] + actual_records_2 = [message['data'] + for message in synced_records_2[stream]['messages'] + if message['action'] == 'upsert'] + actual_record_count_1 = first_record_count_by_stream[stream] + actual_records_1 = [message['data'] + for message in synced_records[stream]['messages'] + if message['action'] == 'upsert'] + + # NB: There are no replication-key values on records and so we cannot confirm that the records, + # replicated respect the bookmark via direct comparison. All we can do is verify syncs correspond + # to the repliaction methods logically by strategically setting the simulated state. + + if replication_method == self.INCREMENTAL: + + # get saved states + stream_replication_key = list(self.expected_replication_keys()[stream])[0] + bookmark_1 = state_1['bookmarks'][stream][stream_replication_key] + bookmark_2 = state_2['bookmarks'][stream][stream_replication_key] + + # BUG_TDL-15782 [tap-hubspot] Failure to recover from interrupted sync (engagements, companies) + if stream in {'companies', 'engagements'}: + continue # skip failng assertions + + # verify the uninterrupted sync and the simulated sync end with the same bookmark values + self.assertEqual(bookmark_1, bookmark_2) + + # trim records down to just the primary key values + sync_1_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_1] + sync_2_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_2] + # ensure no dupe records present + self.assertCountEqual(set(sync_1_pks), sync_1_pks) + self.assertCountEqual(set(sync_2_pks), sync_2_pks) + + # verify the records from sync 1 are not present in sync 2 as the simulated state + # does not correspond to a specific record's replication-key value + self.assertTrue(set(sync_2_pks).issubset(set(sync_1_pks))) + + else: + raise AssertionError(f"Replication method is {replication_method} for stream: {stream}") + diff --git a/archive/tests/test_hubspot_interrupted_sync_offset.py b/archive/tests/test_hubspot_interrupted_sync_offset.py new file mode 100644 index 0000000..891362b --- /dev/null +++ b/archive/tests/test_hubspot_interrupted_sync_offset.py @@ -0,0 +1,141 @@ +from datetime import datetime, timedelta +from time import sleep +import copy + +import tap_tester.connections as connections +import tap_tester.menagerie as menagerie +import tap_tester.runner as runner + +from base import HubspotBaseTest +from client import TestClient + + +class TestHubspotInterruptedSyncOffsetContactLists(HubspotBaseTest): + """Testing interrupted syncs for streams that implement unique bookmarking logic.""" + @staticmethod + def name(): + return "tt_hubspot_interrupt_contact_lists" + + def streams_to_test(self): + """expected streams minus the streams not under test""" + untested = { + # Streams tested elsewhere + 'companies', # covered in TestHubspotInterruptedSync1 + 'engagements', # covered in TestHubspotInterruptedSync1 + # Feature Request | TDL-16095: [tap-hubspot] All incremental + # streams should implement the interruptible sync feature + 'forms', # TDL-16095 + 'owners', # TDL-16095 + 'workflows', # TDL-16095 + # Streams that do not apply + 'deal_pipelines', # interruptible does not apply, child of deals + 'campaigns', # unable to manually find a partial state with our test data + 'email_events', # unable to manually find a partial state with our test data + 'contacts_by_company', # interruptible does not apply, child of 'companies' + 'subscription_changes', # BUG_TDL-14938 + 'tickets' # covered in TestHubspotInterruptedSync1 + } + + return self.expected_streams() - untested + + def stream_to_interrupt(self): + return 'contact_lists' + + def state_to_inject(self): + return {'offset': {'offset': 250}} + + def get_properties(self): + return { + 'start_date' : datetime.strftime( + datetime.today()-timedelta(days=3), self.START_DATE_FORMAT + ), + } + + def setUp(self): + self.maxDiff = None # see all output in failure + + def test_run(self): + + # BUG TDL-16094 [tap-hubspot] `contacts` streams fails to recover from sync interruption + if self.stream_to_interrupt() == 'contacts': + self.skipTest("Skipping contacts TEST! See BUG[TDL-16094]") + + + expected_streams = self.streams_to_test() + + conn_id = connections.ensure_connection(self) + + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # Select only the expected streams tables + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] + for catalog_entry in catalog_entries: + stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) + connections.select_catalog_and_fields_via_metadata( + conn_id, + catalog_entry, + stream_schema + ) + + # Run sync 1 + first_record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records = runner.get_records_from_target_output() + state_1 = menagerie.get_state(conn_id) + + # Update state to simulate a bookmark + stream = self.stream_to_interrupt() + new_state = copy.deepcopy(state_1) + new_state['bookmarks'][stream] = self.state_to_inject() + new_state['currently_syncing'] = stream + menagerie.set_state(conn_id, new_state) + + # run second sync + second_record_count_by_stream = self.run_and_verify_sync(conn_id) + synced_records_2 = runner.get_records_from_target_output() + state_2 = menagerie.get_state(conn_id) + + # Verify post-iterrupted sync bookmark should be greater than or equal to interrupted sync bookmark + # since newly created test records may get updated while stream is syncing + replication_keys = self.expected_replication_keys() + for stream in state_1.get('bookmarks'): + replication_key = list(replication_keys[stream])[0] + self.assertLessEqual(state_1["bookmarks"][stream].get(replication_key), + state_2["bookmarks"][stream].get(replication_key), + msg="First sync bookmark should not be greater than the second bookmark.") + + +class TestHubspotInterruptedSyncOffsetContacts(TestHubspotInterruptedSyncOffsetContactLists): + """Testing interrupted syncs for streams that implement unique bookmarking logic.""" + @staticmethod + def name(): + return "tt_hubspot_interrupt_contacts" + + def get_properties(self): + return { + 'start_date' : '2021-10-01T00:00:00Z' + } + + + def stream_to_interrupt(self): + return 'contacts' + + def state_to_inject(self): + return {'offset': {'vidOffset': 3502}} + +class TestHubspotInterruptedSyncOffsetDeals(TestHubspotInterruptedSyncOffsetContactLists): + """Testing interrupted syncs for streams that implement unique bookmarking logic.""" + @staticmethod + def name(): + return "tt_hubspot_interrupt_deals" + + def get_properties(self): + return { + 'start_date' : '2021-10-10T00:00:00Z' + } + + def stream_to_interrupt(self): + return 'deals' + + def state_to_inject(self): + return {'property_hs_lastmodifieddate': '2021-10-13T08:32:08.383000Z', + 'offset': {'offset': 3442973342}} diff --git a/archive/tests/test_hubspot_pagination.py b/archive/tests/test_hubspot_pagination.py new file mode 100644 index 0000000..d9a2faa --- /dev/null +++ b/archive/tests/test_hubspot_pagination.py @@ -0,0 +1,140 @@ +from datetime import datetime +from datetime import timedelta +import time + +import tap_tester.connections as connections +import tap_tester.menagerie as menagerie +import tap_tester.runner as runner +from tap_tester.logger import LOGGER + +from client import TestClient +from base import HubspotBaseTest + + +class TestHubspotPagination(HubspotBaseTest): + + @staticmethod + def name(): + return "tt_hubspot_pagination" + + def get_properties(self): + return { + 'start_date' : datetime.strftime(datetime.today()-timedelta(days=7), self.START_DATE_FORMAT) + } + + def setUp(self): + self.maxDiff = None # see all output in failure + + # initialize the test client + setup_start = time.perf_counter() + test_client = TestClient(self.get_properties()['start_date']) + + # gather expectations + existing_records = dict() + limits = self.expected_page_limits() + streams = self.streams_to_test() + + # order the creation of test data for streams based on the streams under test + # this is necessary for child streams and streams that share underlying data in hubspot + if 'subscription_changes' in streams and 'email_events' in streams: + streams.remove('email_events') # we get this for free with subscription_changes + stream_to_run_last = 'contacts_by_company' # child stream depends on companyIds, must go last + if stream_to_run_last in streams: + streams.remove(stream_to_run_last) + streams = list(streams) + streams.append(stream_to_run_last) + + # generate test data if necessary, one stream at a time + for stream in streams: + + # Get all records + if stream == 'contacts_by_company': + company_ids = [company['companyId'] for company in existing_records['companies']] + existing_records[stream] = test_client.read(stream, parent_ids=company_ids) + elif stream in {'companies', 'contact_lists', 'subscription_changes', 'engagements', 'email_events'}: + existing_records[stream] = test_client.read(stream) + else: + existing_records[stream] = test_client.read(stream) + + # check if we exceed the pagination limit + LOGGER.info(f"Pagination limit set to - {limits[stream]} and total number of existing record - {len(existing_records[stream])}") + under_target = limits[stream] + 1 - len(existing_records[stream]) + LOGGER.info(f'under_target = {under_target} for {stream}') + + # if we do not exceed the limit generate more data so that we do + if under_target > 0 : + LOGGER.info(f"need to make {under_target} records for {stream} stream") + if stream in {'subscription_changes', 'emails_events'}: + test_client.create(stream, subscriptions=existing_records[stream], times=under_target) + elif stream == 'contacts_by_company': + test_client.create(stream, company_ids, times=under_target) + else: + for i in range(under_target): + # create records to exceed limit + test_client.create(stream) + + setup_end = time.perf_counter() + LOGGER.info(f"Test Client took about {str(setup_end-setup_start).split('.')[0]} seconds") + + def streams_to_test(self): + """ + All streams with limits are under test + """ + streams_with_page_limits = { + stream + for stream, limit in self.expected_page_limits().items() + if limit + } + streams_to_test = streams_with_page_limits.difference({ + # updates for contacts_by_company do not get processed quickly or consistently + # via Hubspot API, unable to guarantee page limit is exceeded + 'contacts_by_company', + 'email_events', + 'subscription_changes', # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 + }) + + return streams_to_test + + def test_run(self): + # Select only the expected streams tables + expected_streams = self.streams_to_test() + conn_id = connections.ensure_connection(self) + found_catalogs = self.run_and_verify_check_mode(conn_id) + + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] + for catalog_entry in catalog_entries: + stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) + connections.select_catalog_and_fields_via_metadata( + conn_id, + catalog_entry, + stream_schema + ) + + sync_record_count = self.run_and_verify_sync(conn_id) + sync_records = runner.get_records_from_target_output() + + + # Test by stream + for stream in expected_streams: + with self.subTest(stream=stream): + + record_count = sync_record_count.get(stream, 0) + + sync_messages = sync_records.get(stream, {'messages': []}).get('messages') + + primary_keys = self.expected_primary_keys().get(stream) + + # Verify the sync meets or exceeds the default record count + stream_page_size = self.expected_page_limits()[stream] + self.assertLess(stream_page_size, record_count) + + # Verify we did not duplicate any records across pages + records_pks_set = {tuple([message.get('data').get(primary_key) + for primary_key in primary_keys]) + for message in sync_messages} + records_pks_list = [tuple([message.get('data').get(primary_key) + for primary_key in primary_keys]) + for message in sync_messages] + # records_pks_list = [message.get('data').get(primary_key) for message in sync_messages] + self.assertCountEqual(records_pks_set, records_pks_list, + msg=f"We have duplicate records for {stream}") diff --git a/archive/tests/test_hubspot_start_date.py b/archive/tests/test_hubspot_start_date.py new file mode 100644 index 0000000..df2ac64 --- /dev/null +++ b/archive/tests/test_hubspot_start_date.py @@ -0,0 +1,179 @@ +import datetime + +import tap_tester.connections as connections +import tap_tester.menagerie as menagerie +import tap_tester.runner as runner +from tap_tester import LOGGER + +from base import HubspotBaseTest +from client import TestClient + + +STATIC_DATA_STREAMS = {'owners', 'campaigns'} + +class TestHubspotStartDate(HubspotBaseTest): + + @staticmethod + def name(): + return "tt_hubspot_start_date" + + def setUp(self): + """ + Create 1 record for every stream under test, because we must guarantee that + over time there will always be more records in the sync 1 time bin + (of start_date_1 -> now) than there are in the sync 2 time bin (of start_date_2 -> now). + """ + + LOGGER.info("running streams with creates") + streams_under_test = self.expected_streams() - {'email_events'} # we get this for free with subscription_changes + self.my_start_date = self.get_properties()['start_date'] + self.test_client = TestClient(self.my_start_date) + for stream in streams_under_test: + if stream == 'contacts_by_company': + companies_records = self.test_client.read('companies', since=self.my_start_date) + company_ids = [company['companyId'] for company in companies_records] + self.test_client.create(stream, company_ids) + else: + self.test_client.create(stream) + + def expected_streams(self): + """ + If any streams cannot have data generated programmatically, + hardcode start_dates for these streams and run the test twice. + streams tested in TestHubspotStartDateStatic should be removed. + """ + return self.expected_check_streams().difference({ + 'owners', # static test data, covered in separate test + 'campaigns', # static test data, covered in separate test + }) + + + def get_properties(self, original=True): + utc_today = datetime.datetime.strftime( + datetime.datetime.utcnow(), self.START_DATE_FORMAT + ) + + if original: + return { + 'start_date' : self.timedelta_formatted(utc_today, days=-2) + } + else: + return { + 'start_date': utc_today + } + + def test_run(self): + + # SYNC 1 + conn_id = connections.ensure_connection(self) + found_catalogs = self.run_and_verify_check_mode(conn_id) + + # Select only the expected streams tables + expected_streams = self.expected_streams() + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] + self.select_all_streams_and_fields(conn_id, catalog_entries) + + first_record_count_by_stream = self.run_and_verify_sync(conn_id) + first_sync_records = runner.get_records_from_target_output() + + # SYNC 2 + conn_id = connections.ensure_connection(self, original_properties=False) + found_catalogs = self.run_and_verify_check_mode(conn_id) + catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] + self.select_all_streams_and_fields(conn_id, catalog_entries) + second_record_count_by_stream = self.run_and_verify_sync(conn_id) + second_sync_records = runner.get_records_from_target_output() + + # Test by stream + for stream in self.expected_streams(): + with self.subTest(stream=stream): + + # gather expectations + start_date_1 = self.get_properties()['start_date'] + start_date_2 = self.get_properties(original=False)['start_date'] + primary_keys = self.expected_primary_keys()[stream] + replication_key = list(self.expected_replication_keys()[stream]) + + # gather results + first_sync_count = first_record_count_by_stream.get(stream, 0) + second_sync_count = second_record_count_by_stream.get(stream, 0) + first_sync_messages = first_sync_records.get(stream, {'messages': []}).get('messages') + second_sync_messages = second_sync_records.get(stream, {'messages': []}).get('messages') + first_sync_primary_keys = set(tuple([record['data'][pk] for pk in primary_keys]) + for record in first_sync_messages) + second_sync_primary_keys = set(tuple([record['data'][pk] for pk in primary_keys]) + for record in second_sync_messages) + + if self.expected_metadata()[stream][self.OBEYS_START_DATE]: + + # Verify sync 2 overlaps with sync 1 + self.assertFalse(first_sync_primary_keys.isdisjoint(second_sync_primary_keys), + msg='There should be a shared set of data from start date 2 through sync execution time.') + + # Verify the second sync has less data + self.assertGreater(first_sync_count, second_sync_count) + + # for incrmental streams we can compare records agains the start date + if replication_key and stream not in {'contacts', 'subscription_changes', 'email_events'}: # BUG_TDL-9939 + + # BUG_TDL-9939 replication key is not listed correctly + if stream in {"campaigns", "companies", "contacts_by_company", "deal_pipelines", "deals"}: + # For deals stream, the replication key is already prefixed with 'property_'. + replication_key = [replication_key[0]] if stream in ["deals", "companies"] else [f'property_{replication_key[0]}'] + first_sync_replication_key_values = [record['data'][replication_key[0]]['value'] + for record in first_sync_messages] + second_sync_replication_key_values = [record['data'][replication_key[0]]['value'] + for record in second_sync_messages] + else: + first_sync_replication_key_values = [record['data'][replication_key[0]] for record in first_sync_messages] + second_sync_replication_key_values = [record['data'][replication_key[0]] for record in second_sync_messages] + formatted_start_date_1 = start_date_1.replace('Z', '.000000Z') + formatted_start_date_2 = start_date_2.replace('Z', '.000000Z') + + # Verify the replication key values are greater than or equal to the start date + # for sync 1 + for value in first_sync_replication_key_values: + self.assertGreaterEqual(value, formatted_start_date_1) + # and for sync 2 + for value in second_sync_replication_key_values: + self.assertGreaterEqual(value, formatted_start_date_2) + else: + + # If Start date is not obeyed then verify the syncs are equal + self.assertEqual(first_sync_count, second_sync_count) + self.assertEqual(first_sync_primary_keys, second_sync_primary_keys) + + # Verify records are replicated for both syncs + self.assertGreater(first_sync_count, 0, + msg='start date usage is not confirmed when no records are replicated') + self.assertGreater(second_sync_count, 0, + msg='start date usage is not confirmed when no records are replicated') + + +class TestHubspotStartDateStatic(TestHubspotStartDate): + @staticmethod + def name(): + return "tt_hubspot_start_date_static" + + def expected_streams(self): + """expected streams minus the streams not under test""" + return { + 'owners', + 'campaigns', + } + + def get_properties(self, original=True): + utc_today = datetime.datetime.strftime( + datetime.datetime.utcnow(), self.START_DATE_FORMAT + ) + + if original: + return {'start_date' : '2017-11-22T00:00:00Z'} + + else: + return { + 'start_date' : '2022-02-25T00:00:00Z' + } + + def setUp(self): + LOGGER.info("running streams with no creates") diff --git a/archive/tests/unittests/test_deals.py b/archive/tests/unittests/test_deals.py new file mode 100644 index 0000000..46b97fe --- /dev/null +++ b/archive/tests/unittests/test_deals.py @@ -0,0 +1,101 @@ +""" +Unit tests at the functions need to run `sync_deals` +""" +import os +import unittest +from tap_hubspot import acquire_access_token_from_refresh_token +from tap_hubspot import CONFIG +from tap_hubspot import gen_request +from tap_hubspot import get_url +from tap_hubspot import merge_responses +from tap_hubspot import process_v3_deals_records + + +class TestDeals(unittest.TestCase): + """ + This class gets an access token for the tests to use and then tests + assumptions we have about the tap + """ + def setUp(self): + """ + This functions reads in the variables need to get an access token + """ + CONFIG['redirect_uri'] = os.environ['HUBSPOT_REDIRECT_URI'] + CONFIG['refresh_token'] = os.environ['HUBSPOT_REFRESH_TOKEN'] + CONFIG['client_id'] = os.environ['HUBSPOT_CLIENT_ID'] + CONFIG['client_secret'] = os.environ['HUBSPOT_CLIENT_SECRET'] + + acquire_access_token_from_refresh_token() + + + def test_can_fetch_hs_date_entered_props(self): + """ + This test is written on the assumption that `sync_deals()` calls + `gen_request()` to get records + """ + state = {} + url = get_url('deals_all') + params = {'count': 250, + 'includeAssociations': False, + 'properties' : []} + v3_fields = ['hs_date_entered_appointmentscheduled'] + + records = list( + gen_request(state, 'deals', url, params, 'deals', "hasMore", ["offset"], ["offset"], v3_fields=v3_fields) + ) + + for record in records: + # The test account has a deal stage called "appointment scheduled" + value = record.get('properties',{}).get('hs_date_entered_appointmentscheduled') + error_msg = ('Could not find "hs_date_entered_appointment_scheduled"' + 'in {}').format(record) + self.assertIsNotNone(value, msg=error_msg) + + def test_process_v3_deals_records(self): + self.maxDiff = None + data = [ + {'properties': {'field1': 'value1', + 'field2': 'value2', + 'hs_date_entered_field3': 'value3', + 'hs_date_exited_field4': 'value4',}}, + ] + + expected = [ + {'properties': {'hs_date_entered_field3': {'value': 'value3'}, + 'hs_date_exited_field4': {'value': 'value4'},}}, + ] + + actual = process_v3_deals_records(data) + + self.assertDictEqual(expected[0]['properties'], actual[0]['properties']) + + def test_merge_responses(self): + v1_resp = [ + {'dealId': '1', + 'properties': {'field1': 'value1',}}, + {'dealId': '2', + 'properties': {'field3': 'value3',}}, + ] + + v3_resp = [ + {'id': '1', + 'properties': {'field2': 'value2',}}, + {'id': '2', + 'properties': {'field4': 'value4',}}, + ] + + expected = [ + {'dealId': '1', + 'properties': {'field1': 'value1', + 'field2': 'value2',}}, + {'dealId': '2', + 'properties': {'field3': 'value3', + 'field4': 'value4',}}, + ] + + merge_responses(v1_resp, v3_resp) + + for expected_record in expected: + for actual_record in v1_resp: + if actual_record['dealId'] == expected_record['dealId']: + self.assertDictEqual(expected_record, actual_record) From 992e911399010fdcf24859abb11516e4957a35f2 Mon Sep 17 00:00:00 2001 From: Ethan Stein Date: Thu, 4 May 2023 14:08:37 -0700 Subject: [PATCH 002/105] Adding .DS_Store to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index d9005f2..f3dbb0a 100644 --- a/.gitignore +++ b/.gitignore @@ -150,3 +150,4 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ +.DS_Store From dd035124601e1014c73def05b48a63d4ea1bc96d Mon Sep 17 00:00:00 2001 From: edward Date: Thu, 18 May 2023 13:40:02 -0400 Subject: [PATCH 003/105] remove password param, using OAuth --- meltano.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/meltano.yml b/meltano.yml index 4d60c4c..1abe739 100644 --- a/meltano.yml +++ b/meltano.yml @@ -20,8 +20,6 @@ plugins: settings: # TODO: To configure using Meltano, declare settings and their types here: - name: username - - name: password - kind: password - name: start_date value: '2010-01-01T00:00:00Z' loaders: From 1aa767b78245ea7176529a85f9b190ed49305f7b Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 18 May 2023 13:41:42 -0400 Subject: [PATCH 004/105] .idea in gitingore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index f3dbb0a..7d2cc21 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ share/python-wheels/ *.egg-info/ .installed.cfg *.egg +.idea MANIFEST # PyInstaller From c5bceefd5721a66dea85bcbe6933c2871fbdb233 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 18 May 2023 13:43:15 -0400 Subject: [PATCH 005/105] edit stream inital template --- tap_hubspot_sdk/streams.py | 52 ++++++-------------------------------- 1 file changed, 8 insertions(+), 44 deletions(-) diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index 8cd9f6a..0c387a9 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -8,11 +8,14 @@ from tap_hubspot_sdk.client import tap-hubspot-sdkStream -# TODO: Delete this is if not using json files for schema definition -SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") -# TODO: - Override `UsersStream` and `GroupsStream` with your own stream definition. -# - Copy-paste as many times as needed to create multiple stream types. - +PropertiesList = th.PropertiesList +Property = th.Property +ObjectType = th.ObjectType +DateTimeType = th.DateTimeType +StringType = th.StringType +ArrayType = th.ArrayType +BooleanType = th.BooleanType +IntegerType = th.IntegerType class UsersStream(tap-hubspot-sdkStream): """Define custom stream.""" @@ -23,43 +26,4 @@ class UsersStream(tap-hubspot-sdkStream): replication_key = None # Optionally, you may also use `schema_filepath` in place of `schema`: # schema_filepath = SCHEMAS_DIR / "users.json" # noqa: ERA001 - schema = th.PropertiesList( - th.Property("name", th.StringType), - th.Property( - "id", - th.StringType, - description="The user's system ID", - ), - th.Property( - "age", - th.IntegerType, - description="The user's age in years", - ), - th.Property( - "email", - th.StringType, - description="The user's email address", - ), - th.Property("street", th.StringType), - th.Property("city", th.StringType), - th.Property( - "state", - th.StringType, - description="State name in ISO 3166-2 format", - ), - th.Property("zip", th.StringType), - ).to_dict() - -class GroupsStream(tap-hubspot-sdkStream): - """Define custom stream.""" - - name = "groups" - path = "/groups" - primary_keys = ["id"] - replication_key = "modified" - schema = th.PropertiesList( - th.Property("name", th.StringType), - th.Property("id", th.StringType), - th.Property("modified", th.DateTimeType), - ).to_dict() From 07ef7e11dc26a885b54b0eddbc6bf4fd656c4df7 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 18 May 2023 14:28:02 -0400 Subject: [PATCH 006/105] Change Husbpot class / stream names --- pyproject.toml | 2 +- tap_hubspot_sdk/client.py | 4 +-- tap_hubspot_sdk/streams.py | 50 +++++++++++++++++++++++++++++++------- tap_hubspot_sdk/tap.py | 6 ++--- 4 files changed, 47 insertions(+), 15 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4e11a36..1287a7c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,4 +53,4 @@ build-backend = "poetry.core.masonry.api" [tool.poetry.scripts] # CLI declaration -tap-hubspot-sdk = 'tap_hubspot_sdk.tap:Taptap-hubspot-sdk.cli' +tap-hubspot-sdk = 'tap_hubspot_sdk.tap:TapHubspot.cli' diff --git a/tap_hubspot_sdk/client.py b/tap_hubspot_sdk/client.py index 453bc62..21bd3fa 100644 --- a/tap_hubspot_sdk/client.py +++ b/tap_hubspot_sdk/client.py @@ -1,4 +1,4 @@ -"""REST client handling, including tap-hubspot-sdkStream base class.""" +"""REST client handling, including HubspotStream base class.""" from __future__ import annotations @@ -22,7 +22,7 @@ SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") -class tap-hubspot-sdkStream(RESTStream): +class HubspotStream(RESTStream): """tap-hubspot-sdk stream class.""" @property diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index 0c387a9..07625b9 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -6,7 +6,7 @@ from singer_sdk import typing as th # JSON Schema typing helpers -from tap_hubspot_sdk.client import tap-hubspot-sdkStream +from tap_hubspot_sdk.client import HubspotStream PropertiesList = th.PropertiesList Property = th.Property @@ -17,13 +17,45 @@ BooleanType = th.BooleanType IntegerType = th.IntegerType -class UsersStream(tap-hubspot-sdkStream): - """Define custom stream.""" - name = "users" - path = "/users" - primary_keys = ["id"] - replication_key = None - # Optionally, you may also use `schema_filepath` in place of `schema`: - # schema_filepath = SCHEMAS_DIR / "users.json" # noqa: ERA001 +class AccountStream(HubspotStream): + columns = """ + + """ + + name = "account" + path = "/query?q=SELECT+{}+from+Account".format(columns) + primary_keys = ["Id"] + replication_key = "LastModifiedDate" + replication_method = "incremental" + + schema = PropertiesList( + Property("Id", StringType), + + + ).to_dict() + + def get_url_params( + self, + context: dict | None, # noqa: ARG002 + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot_sdk/tap.py index 4381b0a..14597fd 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot_sdk/tap.py @@ -9,7 +9,7 @@ from tap_hubspot_sdk import streams -class Taptap-hubspot-sdk(Tap): +class TapHubspot(Tap): """tap-hubspot-sdk tap class.""" name = "tap-hubspot-sdk" @@ -42,7 +42,7 @@ class Taptap-hubspot-sdk(Tap): ), ).to_dict() - def discover_streams(self) -> list[streams.tap-hubspot-sdkStream]: + def discover_streams(self) -> list[streams.HubspotStream]: """Return a list of discovered streams. Returns: @@ -55,4 +55,4 @@ def discover_streams(self) -> list[streams.tap-hubspot-sdkStream]: if __name__ == "__main__": - Taptap-hubspot-sdk.cli() + TapHubspot.cli() From 5070e123be3e8580ff7ee2f75d1ef2da143beb47 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 18 May 2023 15:35:37 -0400 Subject: [PATCH 007/105] Auth naming convention --- tap_hubspot_sdk/auth.py | 4 ++-- tap_hubspot_sdk/client.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tap_hubspot_sdk/auth.py b/tap_hubspot_sdk/auth.py index f1a19ba..63d6410 100644 --- a/tap_hubspot_sdk/auth.py +++ b/tap_hubspot_sdk/auth.py @@ -7,7 +7,7 @@ # The SingletonMeta metaclass makes your streams reuse the same authenticator instance. # If this behaviour interferes with your use-case, you can remove the metaclass. -class tap-hubspot-sdkAuthenticator(OAuthAuthenticator, metaclass=SingletonMeta): +class tapHubspotAuthenticator(OAuthAuthenticator, metaclass=SingletonMeta): """Authenticator class for tap-hubspot-sdk.""" @property @@ -28,7 +28,7 @@ def oauth_request_body(self) -> dict: } @classmethod - def create_for_stream(cls, stream) -> tap-hubspot-sdkAuthenticator: # noqa: ANN001 + def create_for_stream(cls, stream) -> tapHubspotAuthenticator: # noqa: ANN001 """Instantiate an authenticator for a specific Singer stream. Args: diff --git a/tap_hubspot_sdk/client.py b/tap_hubspot_sdk/client.py index 21bd3fa..261668f 100644 --- a/tap_hubspot_sdk/client.py +++ b/tap_hubspot_sdk/client.py @@ -11,7 +11,7 @@ from singer_sdk.pagination import BaseAPIPaginator # noqa: TCH002 from singer_sdk.streams import RESTStream -from tap_hubspot_sdk.auth import tap-hubspot-sdkAuthenticator +from tap_hubspot_sdk.auth import tapHubspotAuthenticator if sys.version_info >= (3, 8): from functools import cached_property @@ -19,7 +19,7 @@ from cached_property import cached_property _Auth = Callable[[requests.PreparedRequest], requests.PreparedRequest] -SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") +#SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") class HubspotStream(RESTStream): @@ -27,9 +27,9 @@ class HubspotStream(RESTStream): @property def url_base(self) -> str: - """Return the API URL root, configurable via tap settings.""" - # TODO: hardcode a value here, or retrieve it from self.config - return "https://api.mysample.com" + #version = self.config.get("api_version", "") + base_url = "https://api.hubapi.com/contacts/v1"#.format(version) + return base_url records_jsonpath = "$[*]" # Or override `parse_response`. @@ -43,7 +43,7 @@ def authenticator(self) -> _Auth: Returns: An authenticator instance. """ - return tap-hubspot-sdkAuthenticator.create_for_stream(self) + return tapHubspotAuthenticator.create_for_stream(self) @property def http_headers(self) -> dict: From c7bf30aac4efd23d539d750141e79d433a4a7b98 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 18 May 2023 15:35:55 -0400 Subject: [PATCH 008/105] List Stream --- tap_hubspot_sdk/streams.py | 19 +++++++++++-------- tap_hubspot_sdk/tap.py | 15 +-------------- 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index 07625b9..eb7e633 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -18,19 +18,22 @@ IntegerType = th.IntegerType -class AccountStream(HubspotStream): +class ListsStream(HubspotStream): columns = """ - + offset, total, lists, has-more """ - name = "account" - path = "/query?q=SELECT+{}+from+Account".format(columns) - primary_keys = ["Id"] - replication_key = "LastModifiedDate" - replication_method = "incremental" + name = "lists" + path = "/lists" + primary_keys = ["lists"] + #replication_key = "LastModifiedDate" + #replication_method = "incremental" schema = PropertiesList( - Property("Id", StringType), + Property("offset", StringType), + Property("total", StringType), + Property("lists", StringType), + Property("has-more", StringType), ).to_dict() diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot_sdk/tap.py index 14597fd..f404510 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot_sdk/tap.py @@ -23,23 +23,11 @@ class TapHubspot(Tap): secret=True, # Flag config as protected. description="The token to authenticate against the API service", ), - th.Property( - "project_ids", - th.ArrayType(th.StringType), - required=True, - description="Project IDs to replicate", - ), th.Property( "start_date", th.DateTimeType, description="The earliest record date to sync", ), - th.Property( - "api_url", - th.StringType, - default="https://api.mysample.com", - description="The url for the API service", - ), ).to_dict() def discover_streams(self) -> list[streams.HubspotStream]: @@ -49,8 +37,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: A list of discovered streams. """ return [ - streams.GroupsStream(self), - streams.UsersStream(self), + streams.ListsStream(self), ] From cda6e0acd02e5405ec9b45b89cb166ae36559128 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 18 May 2023 16:02:05 -0400 Subject: [PATCH 009/105] add .env.template --- .env.template | 1 + 1 file changed, 1 insertion(+) create mode 100644 .env.template diff --git a/.env.template b/.env.template new file mode 100644 index 0000000..9576942 --- /dev/null +++ b/.env.template @@ -0,0 +1 @@ +TAP_HUBSPOT_ACCESS_TOKEN = '' \ No newline at end of file From 5626860b1dc09bf2e2c399ce8644f43b3556ed4c Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 18 May 2023 16:10:08 -0400 Subject: [PATCH 010/105] add authenticator function --- meltano.yml | 3 +-- tap_hubspot_sdk/client.py | 10 +++++++++- tap_hubspot_sdk/tap.py | 3 +-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/meltano.yml b/meltano.yml index 1abe739..85dbc89 100644 --- a/meltano.yml +++ b/meltano.yml @@ -18,8 +18,7 @@ plugins: config: start_date: '2010-01-01T00:00:00Z' settings: - # TODO: To configure using Meltano, declare settings and their types here: - - name: username + - name: access_token - name: start_date value: '2010-01-01T00:00:00Z' loaders: diff --git a/tap_hubspot_sdk/client.py b/tap_hubspot_sdk/client.py index 261668f..c1ad7b9 100644 --- a/tap_hubspot_sdk/client.py +++ b/tap_hubspot_sdk/client.py @@ -2,6 +2,8 @@ from __future__ import annotations +import json + import sys from pathlib import Path from typing import Any, Callable, Iterable @@ -43,7 +45,13 @@ def authenticator(self) -> _Auth: Returns: An authenticator instance. """ - return tapHubspotAuthenticator.create_for_stream(self) + + url = "https://api.hubapi.com/contacts/v1" + login_api = requests.post(url).text + access_token = json.loads(login_api).get("access_token") + + return BearerTokenAuthenticator.create_for_stream(self, + token=access_token, ) @property def http_headers(self) -> dict: diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot_sdk/tap.py index f404510..3f9c3a9 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot_sdk/tap.py @@ -17,10 +17,9 @@ class TapHubspot(Tap): # TODO: Update this section with the actual config values you expect: config_jsonschema = th.PropertiesList( th.Property( - "auth_token", + "access_token", th.StringType, required=True, - secret=True, # Flag config as protected. description="The token to authenticate against the API service", ), th.Property( From 64963a6294f887284010208f2b624e10df874e33 Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 23 May 2023 16:59:52 +0530 Subject: [PATCH 011/105] updated meltano.yml --- meltano.yml | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/meltano.yml b/meltano.yml index 85dbc89..975a147 100644 --- a/meltano.yml +++ b/meltano.yml @@ -1,9 +1,7 @@ version: 1 send_anonymous_usage_stats: true project_id: "tap-hubspot-sdk" -default_environment: test -environments: -- name: test +default_environment: dev plugins: extractors: - name: "tap-hubspot-sdk" @@ -15,13 +13,43 @@ plugins: - discover - about - stream-maps - config: - start_date: '2010-01-01T00:00:00Z' settings: - name: access_token + value: ${TAP_HUBSPOT_ACCESS_TOKEN} + kind: password - name: start_date - value: '2010-01-01T00:00:00Z' + value: '2023-01-01T00:00:00Z' + - name: end_date + value: '2023-05-22T00:00:00Z' + - name: api_version_1 + value: 'v1' + - name: api_version_2 + value: 'v2' + - name: api_version_3 + value: 'v3' loaders: - name: target-jsonl variant: andyh1203 pip_url: target-jsonl + - name: target-csv + variant: hotgluexyz + pip_url: git+https://github.com/hotgluexyz/target-csv.git@0.3.3 + config: + destination_path: /Users/neilgorman/Documents/GitHub/tap-linkedin-sdk/output + - name: target-snowflake + variant: transferwise + pip_url: pipelinewise-target-snowflake + config: + batch_size: 5 +environments: +- name: dev + config: + plugins: + loaders: + - name: target-csv + - name: target-snowflake + config: + primary_key_required: false + add_metadata_columns: true +- name: staging +- name: prod From d806aefaa6bf26420a5456270075c9a9f348bb3d Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 23 May 2023 17:01:18 +0530 Subject: [PATCH 012/105] added user stream --- tap_hubspot_sdk/tap.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot_sdk/tap.py index 3f9c3a9..11dadf5 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot_sdk/tap.py @@ -19,7 +19,6 @@ class TapHubspot(Tap): th.Property( "access_token", th.StringType, - required=True, description="The token to authenticate against the API service", ), th.Property( @@ -37,6 +36,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: """ return [ streams.ListsStream(self), + streams.UsersStream(self) ] From d24abca7bb3543682e048279fbbdf0bea2a55e50 Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 23 May 2023 17:01:51 +0530 Subject: [PATCH 013/105] updated authentication --- tap_hubspot_sdk/client.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tap_hubspot_sdk/client.py b/tap_hubspot_sdk/client.py index c1ad7b9..f1138f1 100644 --- a/tap_hubspot_sdk/client.py +++ b/tap_hubspot_sdk/client.py @@ -20,6 +20,8 @@ else: from cached_property import cached_property +from singer_sdk.authenticators import BearerTokenAuthenticator + _Auth = Callable[[requests.PreparedRequest], requests.PreparedRequest] #SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") @@ -29,8 +31,7 @@ class HubspotStream(RESTStream): @property def url_base(self) -> str: - #version = self.config.get("api_version", "") - base_url = "https://api.hubapi.com/contacts/v1"#.format(version) + base_url = "https://api.hubapi.com/" return base_url records_jsonpath = "$[*]" # Or override `parse_response`. @@ -46,9 +47,10 @@ def authenticator(self) -> _Auth: An authenticator instance. """ - url = "https://api.hubapi.com/contacts/v1" - login_api = requests.post(url).text - access_token = json.loads(login_api).get("access_token") + #url = "https://api.hubapi.com/contacts/v1" + #login_api = requests.post(url).text + #access_token = json.loads(login_api).get("access_token") + access_token = self.config.get("access_token") return BearerTokenAuthenticator.create_for_stream(self, token=access_token, ) @@ -149,3 +151,4 @@ def post_process( """ # TODO: Delete this method if not needed. return row + \ No newline at end of file From e2cda141d0e614d238355d107b872afd9a37c3ee Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 23 May 2023 17:02:31 +0530 Subject: [PATCH 014/105] updated list stream and added users stream --- tap_hubspot_sdk/streams.py | 174 +++++++++++++++++++++++++++++++++++-- 1 file changed, 165 insertions(+), 9 deletions(-) diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index eb7e633..66e60ef 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -20,24 +20,157 @@ class ListsStream(HubspotStream): columns = """ - offset, total, lists, has-more + vid, canonical-vid, merged-vids, portal-id, is-contact, properties """ - name = "lists" - path = "/lists" - primary_keys = ["lists"] + name = "contact" + path = "/lists/all/contacts/all?fields={}".format(columns) + primary_keys = ["addedAt"] + replication_key = "addedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("vid", IntegerType), + Property("canonical-vid", IntegerType), + Property("merged-vids", ArrayType(StringType)), + Property("portal-id", IntegerType), + Property("is-contact", BooleanType), + Property("properties", + ObjectType(Property("lastmodifieddate", StringType), + Property("email", StringType), + Property("message", StringType), + Property("city", StringType), + Property("company", StringType), + Property("createddate", StringType), + Property("firstname", StringType), + Property("hs_all_contact_vids", StringType), + Property("hs_date_entered_lead", StringType), + Property("hs_marketable_reason_id", StringType), + Property("hs_is_unworked", StringType), + Property("hs_marketable_until_renewal", StringType), + Property("hs_latest_source_timestamp", StringType), + Property("hs_marketable_reason_type", StringType), + Property("hs_marketable_status", StringType), + Property("hs_is_contact", StringType), + Property("hs_email_domain", StringType), + Property("hs_pipeline", StringType), + Property("hs_sequences_actively_enrolled_count", StringType), + Property("hs_object_id", StringType), + Property("hs_time_in_lead", StringType), + Property("num_conversion_events", StringType), + Property("num_unique_conversion_events", StringType), + Property("lastname", StringType), + Property("hs_analytics_num_page_views", StringType), + Property("hs_analytics_num_event_completions", StringType), + Property("hs_analytics_first_timestamp", StringType), + Property("hs_social_twitter_clicks", StringType), + Property("hs_analytics_num_visits", StringType), + Property("twitterprofilephoto", StringType), + Property("twitterhandle", StringType), + Property("hs_analytics_source_data_2", StringType), + Property("hs_social_facebook_clicks", StringType), + Property("hs_analytics_source", StringType), + Property("hs_analytics_source_data_1", StringType), + Property("hs_latest_source", StringType), + Property("hs_latest_source_data_1", StringType), + Property("hs_latest_source_data_2", StringType), + Property("hs_social_google_plus_clicks", StringType), + Property("hs_social_num_broadcast_clicks", StringType), + Property("state", StringType), + Property("hs_social_linkedin_clicks", StringType), + Property("hs_lifecyclestage_lead_date", StringType), + Property("hs_analytics_revenue", StringType), + Property("hs_analytics_average_page_views", StringType), + Property("website", StringType), + Property("lifecyclestage", StringType), + Property("jobtitle", StringType), + ) + + ), + Property("form-submissions", ArrayType(StringType)), + Property("identity-profiles", ArrayType(StringType)), + Property("merge-audits", ArrayType(StringType)), + Property("addedAt", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_1", "") + base_url = "https://api.hubapi.com/contacts/{}".format(version) + return base_url + + def get_url_params( + self, + context: dict | None, # noqa: ARG002 + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + params["property"] = "message","email","city","company","createddate","firstname","hs_all_contact_vids","hs_date_entered_lead","hs_marketable_reason_id","hs_is_unworked","hs_marketable_until_renewal","hs_latest_source_timestamp","hs_marketable_reason_type","hs_marketable_status","hs_is_contact","hs_email_domain","hs_pipeline","hs_sequences_actively_enrolled_count","hs_object_id","hs_time_in_lead","num_conversion_events","num_unique_conversion_events","lastname","hs_analytics_num_page_views","hs_analytics_num_event_completions","hs_analytics_first_timestamp","hs_social_twitter_clicks","hs_analytics_num_visits","twitterprofilephoto","twitterhandle","hs_analytics_source_data_2","hs_social_facebook_clicks","hs_analytics_source","hs_analytics_source_data_1","hs_latest_source","hs_latest_source_data_1","hs_latest_source_data_2","hs_social_google_plus_clicks","hs_social_num_broadcast_clicks","state","hs_social_linkedin_clicks","hs_lifecyclestage_lead_date","hs_analytics_revenue","hs_analytics_average_page_views","website","lifecyclestage","jobtitle" + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("contacts") is not None: + results = resp_json["contacts"] + else: + results = resp_json + + yield from results + +class UsersStream(HubspotStream): + columns = """ + id, email, roleIds, primaryteamid + """ + + name = "users" + path = "/users?fields={}".format(columns) + primary_keys = ["id"] #replication_key = "LastModifiedDate" #replication_method = "incremental" schema = PropertiesList( - Property("offset", StringType), - Property("total", StringType), - Property("lists", StringType), - Property("has-more", StringType), - + Property("id", IntegerType), + Property("email", StringType), + Property("roleIds", ArrayType(StringType)), + Property("primaryteamid", StringType), ).to_dict() + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/settings/{}".format(version) + return base_url + def get_url_params( self, context: dict | None, # noqa: ARG002 @@ -61,4 +194,27 @@ def get_url_params( return params + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + + From a25d9832d67f0fd6a181136b1fc4e64042b36238 Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 23 May 2023 17:38:40 +0530 Subject: [PATCH 015/105] updated list stream --- tap_hubspot_sdk/streams.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index 66e60ef..39409c6 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -43,15 +43,15 @@ class ListsStream(HubspotStream): Property("company", StringType), Property("createddate", StringType), Property("firstname", StringType), - Property("hs_all_contact_vids", StringType), + Property("hs_all_contact_vids", IntegerType), Property("hs_date_entered_lead", StringType), Property("hs_marketable_reason_id", StringType), - Property("hs_is_unworked", StringType), - Property("hs_marketable_until_renewal", StringType), + Property("hs_is_unworked", BooleanType), + Property("hs_marketable_until_renewal", BooleanType), Property("hs_latest_source_timestamp", StringType), Property("hs_marketable_reason_type", StringType), - Property("hs_marketable_status", StringType), - Property("hs_is_contact", StringType), + Property("hs_marketable_status", BooleanType), + Property("hs_is_contact", BooleanType), Property("hs_email_domain", StringType), Property("hs_pipeline", StringType), Property("hs_sequences_actively_enrolled_count", StringType), @@ -214,7 +214,4 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: results = resp_json yield from results - - - - + \ No newline at end of file From 56a14df10badd3b18536c5c7b062a042ad08d85e Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Wed, 24 May 2023 16:47:23 +0530 Subject: [PATCH 016/105] added owner, email subscription, ticket, and deal streams --- tap_hubspot_sdk/streams.py | 304 ++++++++++++++++++++++++++++++++++++- tap_hubspot_sdk/tap.py | 6 +- 2 files changed, 306 insertions(+), 4 deletions(-) diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index 39409c6..16f28b2 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -102,7 +102,7 @@ def url_base(self) -> str: def get_url_params( self, - context: dict | None, # noqa: ARG002 + context: dict | None, next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -122,6 +122,7 @@ def get_url_params( params["order_by"] = self.replication_key params["property"] = "message","email","city","company","createddate","firstname","hs_all_contact_vids","hs_date_entered_lead","hs_marketable_reason_id","hs_is_unworked","hs_marketable_until_renewal","hs_latest_source_timestamp","hs_marketable_reason_type","hs_marketable_status","hs_is_contact","hs_email_domain","hs_pipeline","hs_sequences_actively_enrolled_count","hs_object_id","hs_time_in_lead","num_conversion_events","num_unique_conversion_events","lastname","hs_analytics_num_page_views","hs_analytics_num_event_completions","hs_analytics_first_timestamp","hs_social_twitter_clicks","hs_analytics_num_visits","twitterprofilephoto","twitterhandle","hs_analytics_source_data_2","hs_social_facebook_clicks","hs_analytics_source","hs_analytics_source_data_1","hs_latest_source","hs_latest_source_data_1","hs_latest_source_data_2","hs_social_google_plus_clicks","hs_social_num_broadcast_clicks","state","hs_social_linkedin_clicks","hs_lifecyclestage_lead_date","hs_analytics_revenue","hs_analytics_average_page_views","website","lifecyclestage","jobtitle" + params["propertyMode"] = "value_and_history" return params @@ -173,7 +174,7 @@ def url_base(self) -> str: def get_url_params( self, - context: dict | None, # noqa: ARG002 + context: dict | None, next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -214,4 +215,301 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: results = resp_json yield from results - \ No newline at end of file + +class OwnersStream(HubspotStream): + columns = """ + id, email, firstName, lastName, userId, createdAt, updatedAt, archived + """ + + name = "owners" + path = "/owners?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("id", IntegerType), + Property("email", StringType), + Property("firstName", StringType), + Property("lastName", StringType), + Property("userId", IntegerType), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class TicketPipelineStream(HubspotStream): + columns = """ + label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default + """ + + name = "ticketpipeline" + path = "/pipelines/tickets?fields={}".format(columns) + primary_keys = ["createdAt"] + replication_key = "createdAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("label", StringType), + Property("displayOrder", StringType), + Property("active", BooleanType), + Property("stages", StringType), + Property("objectType", StringType), + Property("objectTypeId", StringType), + Property("pipelineId", StringType), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("default", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_1", "") + base_url = "https://api.hubapi.com/crm-pipelines/{}".format(version) + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class DealPipelineStream(HubspotStream): + columns = """ + label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default + """ + + name = "dealpipeline" + path = "/pipelines/deals?fields={}".format(columns) + primary_keys = ["createdAt"] + replication_key = "createdAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("label", StringType), + Property("displayOrder", StringType), + Property("active", BooleanType), + Property("stages", StringType), + Property("objectType", StringType), + Property("objectTypeId", StringType), + Property("pipelineId", StringType), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("default", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_1", "") + base_url = "https://api.hubapi.com/crm-pipelines/{}".format(version) + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class EmailSubscriptionStream(HubspotStream): + columns = """ + id, portalId, name, description, active, internal, category, channel, internalName, businessUnitId + """ + + name = "emailsubscription" + path = "/subscriptions/?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + schema = PropertiesList( + Property("id", IntegerType), + Property("portalId", IntegerType), + Property("name", StringType), + Property("description", StringType), + Property("active", BooleanType), + Property("internal", StringType), + Property("category", StringType), + Property("channel", StringType), + Property("internalName", StringType), + Property("businessUnitId", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_1", "") + base_url = "https://api.hubapi.com/email/public/{}".format(version) + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("subscriptionDefinitions") is not None: + results = resp_json["subscriptionDefinitions"] + else: + results = resp_json + + yield from results diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot_sdk/tap.py index 11dadf5..1b0e9e4 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot_sdk/tap.py @@ -36,7 +36,11 @@ def discover_streams(self) -> list[streams.HubspotStream]: """ return [ streams.ListsStream(self), - streams.UsersStream(self) + streams.UsersStream(self), + streams.OwnersStream(self), + streams.TicketPipelineStream(self), + streams.DealPipelineStream(self), + streams.EmailSubscriptionStream(self), ] From 9f2bc3fa5d4a5b79137e9feaee71205342fa3e7b Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Thu, 25 May 2023 17:14:34 +0530 Subject: [PATCH 017/105] added api version in meltano.yml --- meltano.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/meltano.yml b/meltano.yml index 975a147..795bcb7 100644 --- a/meltano.yml +++ b/meltano.yml @@ -26,7 +26,9 @@ plugins: - name: api_version_2 value: 'v2' - name: api_version_3 - value: 'v3' + value: 'v3' + - name: api_version_4 + value: 'v4' loaders: - name: target-jsonl variant: andyh1203 From 2ab84cdb89258f21b464ee71c0d0e2dc77074489 Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Thu, 25 May 2023 17:15:09 +0530 Subject: [PATCH 018/105] added streams for peotry stream --- tap_hubspot_sdk/streams.py | 967 +++++++++++++++++++++++++++++++++++++ tap_hubspot_sdk/tap.py | 7 +- 2 files changed, 968 insertions(+), 6 deletions(-) diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index 16f28b2..0ed0c57 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -19,6 +19,7 @@ class ListsStream(HubspotStream): + columns = """ vid, canonical-vid, merged-vids, portal-id, is-contact, properties """ @@ -148,6 +149,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class UsersStream(HubspotStream): + columns = """ id, email, roleIds, primaryteamid """ @@ -217,6 +219,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class OwnersStream(HubspotStream): + columns = """ id, email, firstName, lastName, userId, createdAt, updatedAt, archived """ @@ -290,6 +293,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class TicketPipelineStream(HubspotStream): + columns = """ label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default """ @@ -365,6 +369,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class DealPipelineStream(HubspotStream): + columns = """ label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default """ @@ -440,6 +445,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class EmailSubscriptionStream(HubspotStream): + columns = """ id, portalId, name, description, active, internal, category, channel, internalName, businessUnitId """ @@ -513,3 +519,964 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: results = resp_json yield from results + +class PropertyTicketStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertyticket" + path = "/properties/tickets?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + + row["hubspot_object"] = "ticket" + + return super().post_process(row, context) + +class PropertyDealStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertydeal" + path = "/properties/deals?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + Property("calculationFormula", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "deal" + except: + pass + + return super().post_process(row, context) + +class PropertyContactStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertycontact" + path = "/properties/contacts?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "contact" + except: + pass + + return super().post_process(row, context) + +class PropertyCompanyStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertycompany" + path = "/properties/company?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "company" + except: + pass + + return super().post_process(row, context) + +class PropertyProductStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertyproduct" + path = "/properties/product?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "product" + except: + pass + + return super().post_process(row, context) + +class PropertyLineItemStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertylineitem" + path = "/properties/line_item?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "line_item" + except: + pass + + return super().post_process(row, context) + +class PropertyEmailStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertyemail" + path = "/properties/email?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "email" + except: + pass + + return super().post_process(row, context) + +class PropertyPostalMailStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertypostalmail" + path = "/properties/postal_mail?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "postal_mail" + except: + pass + + return super().post_process(row, context) + +class PropertyCallStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertycall" + path = "/properties/call?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "call" + except: + pass + + return super().post_process(row, context) + +class PropertyMeetingStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertymeeting" + path = "/properties/meeting?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "meeting" + except: + pass + + return super().post_process(row, context) + +class PropertyTaskStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertytask" + path = "/properties/task?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "task" + except: + pass + + return super().post_process(row, context) + +class PropertyCommunicationStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "propertycommunication" + path = "/properties/task?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "communication" + except: + pass + + return super().post_process(row, context) + +class PropertyNotesStream(HubspotStream): + + columns = """ + updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, + calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField + """ + + name = "property" + path = "/properties/notes?fields={}".format(columns) + replication_key = "updatedAt" + replication_method = "incremental" + + schema = PropertiesList( + Property("updatedAt", StringType), + Property("createdAt", StringType), + Property("name", StringType), + Property("label", StringType), + Property("type", StringType), + Property("fieldType", StringType), + Property("description", StringType), + Property("groupName", StringType), + Property("options", StringType), + Property("displayOrder", StringType), + Property("calculated", BooleanType), + Property("externalOptions", BooleanType), + Property("hasUniqueValue", BooleanType), + Property("hidden", BooleanType), + Property("hubspotDefined", BooleanType), + Property("modificationMetadata", StringType), + Property("formField", BooleanType), + Property("hubspot_object", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["hubspot_object"] = "note" + except: + pass + + return super().post_process(row, context) + + def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: + property_ticket = PropertyTicketStream( + self._tap, schema={"properties": {}} + ) + property_deal = PropertyDealStream( + self._tap, schema={"properties": {}} + ) + property_contact = PropertyContactStream( + self._tap, schema={"properties": {}} + ) + property_company = PropertyCompanyStream( + self._tap, schema={"properties": {}} + ) + property_product = PropertyProductStream( + self._tap, schema={"properties": {}} + ) + property_lineitem = PropertyLineItemStream( + self._tap, schema={"properties": {}} + ) + property_email = PropertyEmailStream( + self._tap, schema={"properties": {}} + ) + property_postalmail = PropertyPostalMailStream( + self._tap, schema={"properties": {}} + ) + property_call = PropertyCallStream( + self._tap, schema={"properties": {}} + ) + property_meeting = PropertyMeetingStream( + self._tap, schema={"properties": {}} + ) + property_task = PropertyTaskStream( + self._tap, schema={"properties": {}} + ) + property_communication = PropertyCommunicationStream( + self._tap, schema={"properties": {}} + ) + property_records = list(property_ticket.get_records(context)) + list(property_deal.get_records(context)) + list(property_contact.get_records(context)) + list(property_company.get_records(context)) + list(property_product.get_records(context)) + list(property_lineitem.get_records(context)) + list(property_email.get_records(context)) + list(property_postalmail.get_records(context)) + list(property_call.get_records(context)) + list(property_meeting.get_records(context)) + list(property_task.get_records(context)) + list(property_communication.get_records(context)) + list(super().get_records(context)) + + return property_records + \ No newline at end of file diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot_sdk/tap.py index 1b0e9e4..9bb46cc 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot_sdk/tap.py @@ -35,12 +35,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: A list of discovered streams. """ return [ - streams.ListsStream(self), - streams.UsersStream(self), - streams.OwnersStream(self), - streams.TicketPipelineStream(self), - streams.DealPipelineStream(self), - streams.EmailSubscriptionStream(self), + streams.PropertyNotesStream(self), ] From ff8daea0639648f5f460440733f8ef6c97cfb3cd Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Fri, 26 May 2023 17:55:40 +0530 Subject: [PATCH 019/105] added streams for association type stream --- tap_hubspot_sdk/streams.py | 3595 +++++++++++++++++++++++++++++++++++- tap_hubspot_sdk/tap.py | 7 + 2 files changed, 3601 insertions(+), 1 deletion(-) diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index 0ed0c57..13cbbba 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -19,6 +19,19 @@ class ListsStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods/lists/get_lists + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ vid, canonical-vid, merged-vids, portal-id, is-contact, properties @@ -149,6 +162,19 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class UsersStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods/ + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ id, email, roleIds, primaryteamid @@ -219,6 +245,19 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class OwnersStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods/owners/get_owners + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ id, email, firstName, lastName, userId, createdAt, updatedAt, archived @@ -293,6 +332,19 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class TicketPipelineStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods/tickets/get-all-tickets + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default @@ -369,6 +421,19 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class DealPipelineStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods/deals/get-all-deals + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default @@ -445,6 +510,19 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class EmailSubscriptionStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods/email/get_subscriptions + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ id, portalId, name, description, active, internal, category, channel, internalName, businessUnitId @@ -521,6 +599,19 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results class PropertyTicketStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -551,6 +642,7 @@ class PropertyTicketStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -611,6 +703,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyDealStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -642,6 +747,7 @@ class PropertyDealStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("calculationFormula", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -681,6 +787,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyContactStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -711,6 +830,7 @@ class PropertyContactStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -750,6 +870,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyCompanyStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -780,6 +913,7 @@ class PropertyCompanyStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -819,6 +953,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyProductStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -849,6 +996,7 @@ class PropertyProductStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -888,6 +1036,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyLineItemStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -918,6 +1079,7 @@ class PropertyLineItemStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -957,6 +1119,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyEmailStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -987,6 +1162,7 @@ class PropertyEmailStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -1026,6 +1202,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyPostalMailStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -1056,6 +1245,7 @@ class PropertyPostalMailStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -1095,6 +1285,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyCallStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -1125,6 +1328,7 @@ class PropertyCallStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -1164,6 +1368,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyMeetingStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -1194,6 +1411,7 @@ class PropertyMeetingStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -1233,6 +1451,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyTaskStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -1263,6 +1494,7 @@ class PropertyTaskStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -1302,6 +1534,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyCommunicationStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -1332,6 +1577,7 @@ class PropertyCommunicationStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -1371,6 +1617,19 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) class PropertyNotesStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, @@ -1401,6 +1660,7 @@ class PropertyNotesStream(HubspotStream): Property("modificationMetadata", StringType), Property("formField", BooleanType), Property("hubspot_object", StringType), + Property("showCurrencySymbol", StringType), ).to_dict() @@ -1479,4 +1739,3337 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: property_records = list(property_ticket.get_records(context)) + list(property_deal.get_records(context)) + list(property_contact.get_records(context)) + list(property_company.get_records(context)) + list(property_product.get_records(context)) + list(property_lineitem.get_records(context)) + list(property_email.get_records(context)) + list(property_postalmail.get_records(context)) + list(property_call.get_records(context)) + list(property_meeting.get_records(context)) + list(property_task.get_records(context)) + list(property_communication.get_records(context)) + list(super().get_records(context)) return property_records - \ No newline at end of file + +class AssociationContactCompanyTypeStream(HubspotStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "contactcompanytype" + path = "/associations/contact/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + schema = PropertiesList( + Property("id", IntegerType), + Property("name", StringType), + Property("from_object_type", StringType), + Property("to_object_type", StringType), + Property("category", StringType), + Property("typeId", IntegerType), + Property("label", StringType), + + ).to_dict() + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class AssociationContactCompanyLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "contactcompanylabel" + path = "/associations/contact/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "contact" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationDealContactTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "dealcontacttype" + path = "/associations/deal/contact/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "deal" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationDealContactLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "dealcontactlabel" + path = "/associations/deal/contact/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "deal" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationDealCompanyTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "dealcompanytype" + path = "/associations/deal/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "deal" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationDealCompanyLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "dealcompanylabel" + path = "/associations/deal/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "deal" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketContactTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "ticketcontacttype" + path = "/associations/ticket/contact/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketContactLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "ticketcontactlabel" + path = "/associations/ticket/contact/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketCompanyTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "ticketcompanytype" + path = "/associations/ticket/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketCompanyLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "ticketcompanylabel" + path = "/associations/ticket/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketDealTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "ticketdealtype" + path = "/associations/ticket/deal/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketDealLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "ticketdeallabel" + path = "/associations/ticket/deal/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketCommunicationTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "ticketcommunicationtype" + path = "/associations/ticket/communication/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "communication" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketCommunicationLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "ticketcommunicationlabel" + path = "/associations/ticket/communication/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "communication" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketCallTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "ticketcalltype" + path = "/associations/ticket/call/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "call" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketCallLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "ticketcalllabel" + path = "/associations/ticket/call/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "call" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketMeetingTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "ticketmeetingtype" + path = "/associations/ticket/meeting/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "meeting" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketMeetingLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "ticketmeetinglabel" + path = "/associations/ticket/meeting/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "meeting" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketNoteTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "ticketnotetype" + path = "/associations/ticket/note/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "note" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketNoteLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "ticketnotelabel" + path = "/associations/ticket/note/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "note" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketTaskTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "tickettasktype" + path = "/associations/ticket/task/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "task" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketTaskLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "tickettasklabel" + path = "/associations/ticket/task/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "task" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketEmailTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "ticketemailtype" + path = "/associations/ticket/email/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "email" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketEmailLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "ticketemaillabel" + path = "/associations/ticket/email/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "email" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketPostalMailTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "ticketpostalmailtype" + path = "/associations/ticket/postal_mail/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "postal_mail" + except: + pass + + return super().post_process(row, context) + +class AssociationTicketPostalMailLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "ticketpostalmaillabel" + path = "/associations/ticket/postal_mail/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "ticket" + row["to_object_type"] = "postal_mail" + except: + pass + + return super().post_process(row, context) + +class AssociationLineItemDealTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "lineitemdealtype" + path = "/associations/line_item/deal/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "line_item" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationLineItemDealLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "lineitemdeallabel" + path = "/associations/line_item/deal/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "line_item" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationCommunicationContactTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "communicationcontacttype" + path = "/associations/communication/contact/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "communication" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationCommunicationContactLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "communicationcontactlabel" + path = "/associations/communication/contact/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "communication" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationCommunicationCompanyTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "communicationcompanytype" + path = "/associations/communication/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "communication" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationCommunicationCompanyLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "communicationcompanylabel" + path = "/associations/communication/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "communication" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationCommunicationDealTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "communicationdealtype" + path = "/associations/communication/deal/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "communication" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AsociationCommunicationDealLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "communicationdeallabel" + path = "/associations/communication/deal/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "communication" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationCallContactTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "callcontacttype" + path = "/associations/call/contact/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "call" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationCallContactLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "callcontactlabel" + path = "/associations/call/contact/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "call" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationCallCompanyTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "callcompanytype" + path = "/associations/call/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "call" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationCallCompanyLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "callcompanylabel" + path = "/associations/call/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "call" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationCallDealTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "calldealtype" + path = "/associations/call/deal/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "call" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationCallDealLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "calldeallabel" + path = "/associations/call/deal/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "call" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationEmailContactTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "emailcontacttype" + path = "/associations/email/contact/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "email" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationEmailContactLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "emailcontactlabel" + path = "/associations/email/contact/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "email" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationEmailCompanyTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "emailcompanytype" + path = "/associations/email/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "email" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationEmailCompanyLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "emailcompanylabel" + path = "/associations/email/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "email" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationEmailDealTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "emaildealtype" + path = "/associations/email/deal/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "email" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationEmailDealLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "emaildeallabel" + path = "/associations/email/deal/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "email" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationMeetingContactTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "meetingcontacttype" + path = "/associations/meeting/contact/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "meeting" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationMeetingContactLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "meetingcontactlabel" + path = "/associations/meeting/contact/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "meeting" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationMeetingCompanyTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "meetingcompanytype" + path = "/associations/meeting/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "meeting" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationMeetingCompanyLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "meetingcompanylabel" + path = "/associations/meeting/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "meeting" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationMeetingDealTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "meetingdealtype" + path = "/associations/meeting/deal/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "meeting" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationMeetingDealLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "meetingdeallabel" + path = "/associations/meeting/deal/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "meeting" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationNoteContactTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "notecontacttype" + path = "/associations/note/contact/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "note" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationNoteContactLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "notecontactlabel" + path = "/associations/note/contact/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "note" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationNoteCompanyTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "notecompanytype" + path = "/associations/note/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "note" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationNoteCompanyLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "notecompanylabel" + path = "/associations/note/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "note" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssoxationNoteDealTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "notedealtype" + path = "/associations/note/deal/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "note" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationNoteDealLabel(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "notedeallabel" + path = "/associations/note/deal/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "note" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationTaskContactTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "taskcontacttype" + path = "/associations/task/contact/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "task" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationTaskContactLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "taskcontactlabel" + path = "/associations/task/contact/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "task" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationTaskCompanyTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "taskcompanytype" + path = "/associations/task/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "task" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationTaskCompanyLabelstream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "taskcompanystream" + path = "/associations/task/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "task" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationTaskDealTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "taskdealtype" + path = "/associations/task/deal/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "task" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationTaskDealLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "taskdeallabel" + path = "/associations/task/deal/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "task" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationPostalMailContactTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "postalmalicontacttype" + path = "/associations/postal_mail/contact/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "postal_mail" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationPostalMailContactLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "postalmailcontactlabel" + path = "/associations/postal_mail/contact/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "postal_mail" + row["to_object_type"] = "contact" + except: + pass + + return super().post_process(row, context) + +class AssociationPostalMailCompanyTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "postalmailcompanytype" + path = "/associations/postal_mail/company/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "postal_mail" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationPostalMailCompanyLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "postalmailcompanylabel" + path = "/associations/postal_mail/company/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "postal_mail" + row["to_object_type"] = "company" + except: + pass + + return super().post_process(row, context) + +class AssociationPostalMailDealTypeStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, name + """ + + name = "postalmaildealtype" + path = "/associations/postal_mail/deal/types?fields={}".format(columns) + primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_3", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "postal_mail" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + +class AssociationPostalMailDealLabelStream(AssociationContactCompanyTypeStream): + + """ + https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + category, typeId, label + """ + + name = "association_type" + path = "/associations/postal_mail/deal/labels?fields={}".format(columns) + primary_keys = ["typeId"] + replication_key = "typeId" + replication_method = "incremental" + + @property + def url_base(self) -> str: + version = self.config.get("api_version_4", "") + base_url = "https://api.hubapi.com/crm/{}".format(version) + return base_url + + def post_process(self, row: dict, context: dict | None = None) -> dict | None: + try: + row["from_object_type"] = "postal_mail" + row["to_object_type"] = "deal" + except: + pass + + return super().post_process(row, context) + + def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: + + """ + We have type and label api for id and name column and type and label api for category, typeId, and label columns + We can get data from these api and merge these columns from type and label api with merge_dicts function, we can add the records from merge_dicts function to get the output + """ + + contact_company_type = AssociationContactCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + contact_company_label = AssociationContactCompanyLabelStream( + self._tap, schema={"properties": {}} + ) + deal_contact_type = AssociationDealContactTypeStream( + self._tap, schema={"properties": {}} + ) + deal_contact_label = AssociationDealContactLabelStream( + self._tap, schema={"properties": {}} + ) + deal_company_type = AssociationDealCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + deal_company_label = AssociationDealCompanyLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_contact_type = AssociationTicketContactTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_contact_label = AssociationTicketContactLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_company_type = AssociationTicketCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_company_label = AssociationTicketCompanyLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_deal_type = AssociationTicketDealTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_deal_label = AssociationTicketDealLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_communication_type = AssociationTicketCommunicationTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_communication_label = AssociationTicketCommunicationLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_call_type = AssociationTicketCallTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_call_label = AssociationTicketCallLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_meeting_type = AssociationTicketMeetingTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_meeting_label = AssociationTicketMeetingLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_note_type = AssociationTicketNoteTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_note_label = AssociationTicketNoteLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_task_type = AssociationTicketTaskTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_task_label = AssociationTicketTaskLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_email_type = AssociationTicketEmailTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_email_label = AssociationTicketEmailLabelStream( + self._tap, schema={"properties": {}} + ) + ticket_postal_type = AssociationTicketPostalMailTypeStream( + self._tap, schema={"properties": {}} + ) + ticket_postal_label = AssociationTicketPostalMailLabelStream( + self._tap, schema={"properties": {}} + ) + line_deal_type = AssociationLineItemDealTypeStream( + self._tap, schema={"properties": {}} + ) + line_deal_label = AssociationLineItemDealLabelStream( + self._tap, schema={"properties": {}} + ) + communication_contact_type = AssociationCommunicationContactTypeStream( + self._tap, schema={"properties": {}} + ) + communication_contact_label = AssociationCommunicationContactLabelStream( + self._tap, schema={"properties": {}} + ) + communication_company_type = AssociationCommunicationCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + communication_company_label = AssociationCommunicationCompanyLabelStream( + self._tap, schema={"properties": {}} + ) + communication_deal_type = AssociationCommunicationDealTypeStream( + self._tap, schema={"properties": {}} + ) + communication_deal_label = AsociationCommunicationDealLabelStream( + self._tap, schema={"properties": {}} + ) + call_contact_type = AssociationCallContactTypeStream( + self._tap, schema={"properties": {}} + ) + call_contact_label = AssociationCallContactLabelStream( + self._tap, schema={"properties": {}} + ) + call_company_type = AssociationCallCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + call_company_label = AssociationCallCompanyLabelStream( + self._tap, schema={"properties": {}} + ) + call_deal_type = AssociationCallDealTypeStream( + self._tap, schema={"properties": {}} + ) + call_deal_label = AssociationCallDealLabelStream( + self._tap, schema={"properties": {}} + ) + email_contact_type = AssociationEmailContactTypeStream( + self._tap, schema={"properties": {}} + ) + email_contact_label = AssociationEmailContactLabelStream( + self._tap, schema={"properties": {}} + ) + email_company_type = AssociationEmailCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + email_company_label = AssociationEmailCompanyLabelStream( + self._tap, schema={"properties": {}} + ) + email_deal_type = AssociationEmailDealTypeStream( + self._tap, schema={"properties": {}} + ) + email_deal_label = AssociationEmailDealLabelStream( + self._tap, schema={"properties": {}} + ) + meeting_contact_type = AssociationMeetingContactTypeStream( + self._tap, schema={"properties": {}} + ) + meeting_contact_label = AssociationMeetingContactLabelStream( + self._tap, schema={"properties": {}} + ) + meeting_company_type = AssociationMeetingCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + meeting_company_label = AssociationMeetingCompanyLabelStream( + self._tap, schema={"properties": {}} + ) + meeting_deal_type = AssociationMeetingDealTypeStream( + self._tap, schema={"properties": {}} + ) + meeting_deal_label = AssociationMeetingDealLabelStream( + self._tap, schema={"properties": {}} + ) + note_contact_type = AssociationNoteContactTypeStream( + self._tap, schema={"properties": {}} + ) + note_contact_label = AssociationNoteContactLabelStream( + self._tap, schema={"properties": {}} + ) + note_company_type = AssociationNoteCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + note_company_label = AssociationNoteCompanyLabelStream( + self._tap, schema={"properties": {}} + ) + note_deal_type = AssoxationNoteDealTypeStream( + self._tap, schema={"properties": {}} + ) + note_deal_label = AssociationNoteDealLabel( + self._tap, schema={"properties": {}} + ) + task_contact_type = AssociationTaskContactTypeStream( + self._tap, schema={"properties": {}} + ) + task_contact_label = AssociationTaskContactLabelStream( + self._tap, schema={"properties": {}} + ) + task_company_type = AssociationTaskCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + task_company_label = AssociationTaskCompanyLabelstream( + self._tap, schema={"properties": {}} + ) + task_deal_type = AssociationTaskDealTypeStream( + self._tap, schema={"properties": {}} + ) + task_deal_label = AssociationTaskDealLabelStream( + self._tap, schema={"properties": {}} + ) + postal_contact_type = AssociationPostalMailContactTypeStream( + self._tap, schema={"properties": {}} + ) + postal_contact_label = AssociationPostalMailContactLabelStream( + self._tap, schema={"properties": {}} + ) + postal_company_type = AssociationPostalMailCompanyTypeStream( + self._tap, schema={"properties": {}} + ) + postal_company_label = AssociationPostalMailCompanyLabelStream( + self._tap, schema={"properties": {}} + ) + postal_deal_type = AssociationPostalMailDealTypeStream( + self._tap, schema={"properties": {}} + ) + + contact_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(contact_company_type.get_records(context)), + list(contact_company_label.get_records(context)) + ) + ] + + deal_contact_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(deal_contact_type.get_records(context)), + list(deal_contact_label.get_records(context)) + ) + ] + + deal_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(deal_company_type.get_records(context)), + list(deal_company_label.get_records(context)) + ) + ] + + ticket_contact_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_contact_type.get_records(context)), + list(ticket_contact_label.get_records(context)) + ) + ] + + ticket_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_company_type.get_records(context)), + list(ticket_company_label.get_records(context)) + ) + ] + + ticket_deal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_deal_type.get_records(context)), + list(ticket_deal_label.get_records(context)) + ) + ] + + ticket_communication_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_communication_type.get_records(context)), + list(ticket_communication_label.get_records(context)) + ) + ] + + ticket_call_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_call_type.get_records(context)), + list(ticket_call_label.get_records(context)) + ) + ] + + ticket_meeting_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_meeting_type.get_records(context)), + list(ticket_meeting_label.get_records(context)) + ) + ] + + ticket_note_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_note_type.get_records(context)), + list(ticket_note_label.get_records(context)) + ) + ] + + ticket_task_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_task_type.get_records(context)), + list(ticket_task_label.get_records(context)) + ) + ] + + ticket_email_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_email_type.get_records(context)), + list(ticket_email_label.get_records(context)) + ) + ] + + ticket_postal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(ticket_postal_type.get_records(context)), + list(ticket_postal_label.get_records(context)) + ) + ] + + line_deal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(line_deal_type.get_records(context)), + list(line_deal_label.get_records(context)) + ) + ] + + communication_contact_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(communication_contact_type.get_records(context)), + list(communication_contact_label.get_records(context)) + ) + ] + + communication_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(communication_company_type.get_records(context)), + list(communication_company_label.get_records(context)) + ) + ] + + communication_deal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(communication_deal_type.get_records(context)), + list(communication_deal_label.get_records(context)) + ) + ] + + call_contact_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(call_contact_type.get_records(context)), + list(call_contact_label.get_records(context)) + ) + ] + + call_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(call_company_type.get_records(context)), + list(call_company_label.get_records(context)) + ) + ] + + call_deal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(call_deal_type.get_records(context)), + list(call_deal_label.get_records(context)) + ) + ] + + email_contact_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(email_contact_type.get_records(context)), + list(email_contact_label.get_records(context)) + ) + ] + + email_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(email_company_type.get_records(context)), + list(email_company_label.get_records(context)) + ) + ] + + email_deal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(email_deal_type.get_records(context)), + list(email_deal_label.get_records(context)) + ) + ] + + meeting_contact_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(meeting_contact_type.get_records(context)), + list(meeting_contact_label.get_records(context)) + ) + ] + + meeting_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(meeting_company_type.get_records(context)), + list(meeting_company_label.get_records(context)) + ) + ] + + meeting_deal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(meeting_deal_type.get_records(context)), + list(meeting_deal_label.get_records(context)) + ) + ] + + note_contact_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(note_contact_type.get_records(context)), + list(note_contact_label.get_records(context)) + ) + ] + + note_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(note_company_type.get_records(context)), + list(note_company_label.get_records(context)) + ) + ] + + note_deal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(note_deal_type.get_records(context)), + list(note_deal_label.get_records(context)) + ) + ] + + task_contact_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(task_contact_type.get_records(context)), + list(task_contact_label.get_records(context)) + ) + ] + + task_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(task_company_type.get_records(context)), + list(task_company_label.get_records(context)) + ) + ] + + task_deal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(task_deal_type.get_records(context)), + list(task_deal_label.get_records(context)) + ) + ] + + postal_contact_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(postal_contact_type.get_records(context)), + list(postal_contact_label.get_records(context)) + ) + ] + + postal_company_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(postal_company_type.get_records(context)), + list(postal_company_label.get_records(context)) + ) + ] + + postal_deal_records = [ + self.merge_dicts(x, y) + for x, y in zip( + list(postal_deal_type.get_records(context)), + list(super().get_records(context)) + ) + ] + + association_records = contact_company_records + deal_contact_records + deal_company_records + ticket_contact_records + ticket_company_records + ticket_deal_records + ticket_communication_records + ticket_call_records + ticket_meeting_records + ticket_note_records + ticket_task_records + ticket_email_records + ticket_postal_records + line_deal_records + communication_contact_records + communication_company_records + communication_deal_records + call_contact_records + call_company_records + call_deal_records + email_contact_records + email_company_records + email_deal_records + meeting_contact_records + meeting_company_records + meeting_deal_records + note_contact_records + note_company_records + note_deal_records + task_contact_records + task_company_records + task_deal_records + postal_contact_records + postal_company_records + postal_deal_records + + return association_records + + def merge_dicts(self, *dict_args): + """ + Given any number of dictionaries, shallow copy and merge into a new dict, + precedence goes to key-value pairs in latter dictionaries. + """ + result = {} + for dictionary in dict_args: + result.update(dictionary) + return result + + \ No newline at end of file diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot_sdk/tap.py index 9bb46cc..9b18cee 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot_sdk/tap.py @@ -35,7 +35,14 @@ def discover_streams(self) -> list[streams.HubspotStream]: A list of discovered streams. """ return [ + streams.ListsStream(self), + streams.UsersStream(self), + streams.OwnersStream(self), + streams.TicketPipelineStream(self), + streams.DealPipelineStream(self), + streams.EmailSubscriptionStream(self), streams.PropertyNotesStream(self), + streams.AssociationPostalMailDealLabelStream(self), ] From cc6f20d47c11e9841e575509c937c9dd262af5a4 Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Mon, 12 Jun 2023 16:31:06 +0530 Subject: [PATCH 020/105] added auth variable --- meltano.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/meltano.yml b/meltano.yml index 795bcb7..ea7d213 100644 --- a/meltano.yml +++ b/meltano.yml @@ -28,7 +28,9 @@ plugins: - name: api_version_3 value: 'v3' - name: api_version_4 - value: 'v4' + value: 'v4' + - name: auth_type + value: 'oauth' loaders: - name: target-jsonl variant: andyh1203 From eedd76cb9d13227d7e3b035f1a54b78c986843bf Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Mon, 12 Jun 2023 16:31:59 +0530 Subject: [PATCH 021/105] updated readme --- README.md | 78 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 44 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 01dcdf1..250faea 100644 --- a/README.md +++ b/README.md @@ -4,45 +4,56 @@ Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps. - +* `catalog` +* `state` +* `discover` +* `about` +* `stream-maps` +* `schema-flattening` ## Configuration ### Accepted Config Options - -A full list of supported settings and capabilities for this -tap is available by running: +## Installation ```bash -tap-hubspot-sdk --about +pipx install git+https://github.com/ryan-miranda-partners/tap-hubspot-sdk.git ``` ### Configure using environment variables @@ -53,14 +64,16 @@ environment variable is set either in the terminal context or in the `.env` file ### Source Authentication and Authorization - +A Hubspot access token is required to make API requests. (See [Hubspot API](https://developers.hubspot.com/docs/api/working-with-oauth) docs for more info) ## Usage You can easily run `tap-hubspot-sdk` by itself or in a pipeline using [Meltano](https://meltano.com/). +## Stream Inheritance + +This project uses parent-child streams. Learn more about them [here](https://gitlab.com/meltano/sdk/-/blob/main/docs/parent_streams.md). + ### Executing the Tap Directly ```bash @@ -100,11 +113,8 @@ poetry run tap-hubspot-sdk --help _**Note:** This tap will work in any Singer environment and does not require Meltano. Examples here are for convenience and to streamline end-to-end orchestration scenarios._ - Next, install Meltano (if you haven't already) and any needed plugins: From dd20eb9719981c81370daa82eb1055b982a1755e Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Mon, 12 Jun 2023 16:33:04 +0530 Subject: [PATCH 022/105] added simple and api authenticators --- tap_hubspot_sdk/client.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/tap_hubspot_sdk/client.py b/tap_hubspot_sdk/client.py index f1138f1..f61d3ab 100644 --- a/tap_hubspot_sdk/client.py +++ b/tap_hubspot_sdk/client.py @@ -20,7 +20,8 @@ else: from cached_property import cached_property -from singer_sdk.authenticators import BearerTokenAuthenticator +from singer_sdk.authenticators import BearerTokenAuthenticator, SimpleAuthenticator, BasicAuthenticator, APIAuthenticatorBase +from requests.auth import HTTPBasicAuth _Auth = Callable[[requests.PreparedRequest], requests.PreparedRequest] #SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") @@ -47,13 +48,20 @@ def authenticator(self) -> _Auth: An authenticator instance. """ - #url = "https://api.hubapi.com/contacts/v1" - #login_api = requests.post(url).text - #access_token = json.loads(login_api).get("access_token") access_token = self.config.get("access_token") - - return BearerTokenAuthenticator.create_for_stream(self, - token=access_token, ) + auth_type = self.config.get("auth_type") + + if auth_type == "oauth": + return BearerTokenAuthenticator.create_for_stream(self, + token=access_token, ) + + elif auth_type == "simple": + return SimpleAuthenticator(self, + auth_headers={"Authorization": "Bearer {}".format(access_token),},) + + elif auth_type == "api": + APIAuthenticatorBase.auth_headers = {"Authorization": "Bearer {}".format(access_token),} + return APIAuthenticatorBase(self,) @property def http_headers(self) -> dict: From 5d09678faaa22ce82d1b184c7e253607ed74da16 Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 13 Jun 2023 15:27:46 +0530 Subject: [PATCH 023/105] updated readme and added license --- LICENSE | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 12 ++++--- 2 files changed, 101 insertions(+), 4 deletions(-) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..809108b --- /dev/null +++ b/LICENSE @@ -0,0 +1,93 @@ +Elastic License 2.0 + +URL: https://www.elastic.co/licensing/elastic-license + +## Acceptance + +By using the software, you agree to all of the terms and conditions below. + +## Copyright License + +The licensor grants you a non-exclusive, royalty-free, worldwide, +non-sublicensable, non-transferable license to use, copy, distribute, make +available, and prepare derivative works of the software, in each case subject to +the limitations and conditions below. + +## Limitations + +You may not provide the software to third parties as a hosted or managed +service, where the service provides users with access to any substantial set of +the features or functionality of the software. + +You may not move, change, disable, or circumvent the license key functionality +in the software, and you may not remove or obscure any functionality in the +software that is protected by the license key. + +You may not alter, remove, or obscure any licensing, copyright, or other notices +of the licensor in the software. Any use of the licensor’s trademarks is subject +to applicable law. + +## Patents + +The licensor grants you a license, under any patent claims the licensor can +license, or becomes able to license, to make, have made, use, sell, offer for +sale, import and have imported the software, in each case subject to the +limitations and conditions in this license. This license does not cover any +patent claims that you cause to be infringed by modifications or additions to +the software. If you or your company make any written claim that the software +infringes or contributes to infringement of any patent, your patent license for +the software granted under these terms ends immediately. If your company makes +such a claim, your patent license ends immediately for work on behalf of your +company. + +## Notices + +You must ensure that anyone who gets a copy of any part of the software from you +also gets a copy of these terms. + +If you modify the software, you must include in any modified copies of the +software prominent notices stating that you have modified the software. + +## No Other Rights + +These terms do not imply any licenses other than those expressly granted in +these terms. + +## Termination + +If you use the software in violation of these terms, such use is not licensed, +and your licenses will automatically terminate. If the licensor provides you +with a notice of your violation, and you cease all violation of this license no +later than 30 days after you receive that notice, your licenses will be +reinstated retroactively. However, if you violate these terms after such +reinstatement, any additional violation of these terms will cause your licenses +to terminate automatically and permanently. + +## No Liability + +*As far as the law allows, the software comes as is, without any warranty or +condition, and the licensor will not be liable to you for any damages arising +out of these terms or the use or nature of the software, under any kind of +legal claim.* + +## Definitions + +The **licensor** is the entity offering these terms, and the **software** is the +software the licensor makes available under these terms, including any portion +of it. + +**you** refers to the individual or entity agreeing to these terms. + +**your company** is any legal entity, sole proprietorship, or other kind of +organization that you work for, plus all organizations that have control over, +are under the control of, or are under common control with that +organization. **control** means ownership of substantially all the assets of an +entity, or the power to direct its management and policies by vote, contract, or +otherwise. Control can be direct or indirect. + +**your licenses** are all the licenses granted to you for the software under +these terms. + +**use** means anything you do with the software requiring one of your licenses. + +**trademark** means trademarks, service marks, and similar rights. diff --git a/README.md b/README.md index 250faea..0cc1c41 100644 --- a/README.md +++ b/README.md @@ -20,10 +20,10 @@ Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps. | Setting | Required | Default | Description | |:--------------------|:--------:|:-------:|:------------| | access_token | True | None | The token to authenticate against the API service | -| api_version_1 | False | v1.0 | The API version to request data from. | -| api_version_2 | False | v2.0 | The API version to request data from. | -| api_version_3 | False | v3.0 | The API version to request data from. | -| api_version_4 | False | v4.0 | The API version to request data from. | +| api_version_1 | True | v1.0 | The API version to request data from. | +| api_version_2 | True | v2.0 | The API version to request data from. | +| api_version_3 | True | v3.0 | The API version to request data from. | +| api_version_4 | True | v4.0 | The API version to request data from. | | start_date | False | None | The earliest record date to sync | | end_date | False | None | The latest record date to sync | | stream_maps | False | None | Config object for stream maps capability. For more information check out [Stream Maps](https://sdk.meltano.com/en/latest/stream_maps.html). | @@ -50,6 +50,10 @@ A full list of supported settings and capabilities for this tap is available by tap-hubspot-sdk --about ``` +## Elastic License 2.0 + +The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software. + ## Installation ```bash From 8478e53861d6bd9cd444178557a1701b85824c7a Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 13 Jun 2023 16:31:59 +0530 Subject: [PATCH 024/105] removed archive --- archive/.github/pull_request_template.md | 11 - archive/CHANGELOG.md | 104 - archive/LICENSE | 620 ------ archive/MANIFEST.in | 2 - archive/README.md | 44 - archive/bin/run-a-test.sh | 5 - archive/bin/run-all-tests.sh | 1 - archive/config.sample.json | 9 - archive/setup.cfg | 2 - archive/setup.py | 46 - archive/tap_hubspot/__init__.py | 1306 ------------- archive/tap_hubspot/schemas/campaigns.json | 91 - archive/tap_hubspot/schemas/companies.json | 11 - .../tap_hubspot/schemas/contact_lists.json | 97 - archive/tap_hubspot/schemas/contacts.json | 201 -- .../schemas/contacts_by_company.json | 12 - .../tap_hubspot/schemas/deal_pipelines.json | 46 - archive/tap_hubspot/schemas/deals.json | 37 - archive/tap_hubspot/schemas/email_events.json | 118 -- archive/tap_hubspot/schemas/engagements.json | 179 -- archive/tap_hubspot/schemas/forms.json | 229 --- archive/tap_hubspot/schemas/owners.json | 72 - .../schemas/subscription_changes.json | 54 - archive/tap_hubspot/schemas/tickets.json | 138 -- archive/tap_hubspot/schemas/versions.json | 30 - archive/tap_hubspot/schemas/workflows.json | 48 - archive/tap_hubspot/tests/__init__.py | 0 archive/tap_hubspot/tests/test_bookmarks.py | 62 - archive/tap_hubspot/tests/test_deals.py | 34 - .../tests/test_get_streams_to_sync.py | 44 - archive/tap_hubspot/tests/test_offsets.py | 57 - .../tests/unittests/test_get_start.py | 94 - .../tests/unittests/test_request_timeout.py | 121 -- .../tests/unittests/test_tickets.py | 147 -- archive/tap_hubspot/tests/utils.py | 80 - archive/tests/base.py | 390 ---- archive/tests/client.py | 1679 ----------------- archive/tests/client_tester.py | 280 --- archive/tests/test_hubspot_all_fields.py | 327 ---- .../tests/test_hubspot_automatic_fields.py | 109 -- archive/tests/test_hubspot_bookmarks.py | 248 --- .../tests/test_hubspot_bookmarks_static.py | 127 -- .../tests/test_hubspot_child_stream_only.py | 88 - archive/tests/test_hubspot_discovery.py | 131 -- .../tests/test_hubspot_interrupted_sync.py | 142 -- .../test_hubspot_interrupted_sync_offset.py | 141 -- archive/tests/test_hubspot_pagination.py | 140 -- archive/tests/test_hubspot_start_date.py | 179 -- archive/tests/unittests/test_deals.py | 101 - 49 files changed, 8234 deletions(-) delete mode 100644 archive/.github/pull_request_template.md delete mode 100644 archive/CHANGELOG.md delete mode 100644 archive/LICENSE delete mode 100644 archive/MANIFEST.in delete mode 100644 archive/README.md delete mode 100755 archive/bin/run-a-test.sh delete mode 100755 archive/bin/run-all-tests.sh delete mode 100644 archive/config.sample.json delete mode 100644 archive/setup.cfg delete mode 100644 archive/setup.py delete mode 100644 archive/tap_hubspot/__init__.py delete mode 100644 archive/tap_hubspot/schemas/campaigns.json delete mode 100644 archive/tap_hubspot/schemas/companies.json delete mode 100644 archive/tap_hubspot/schemas/contact_lists.json delete mode 100644 archive/tap_hubspot/schemas/contacts.json delete mode 100644 archive/tap_hubspot/schemas/contacts_by_company.json delete mode 100644 archive/tap_hubspot/schemas/deal_pipelines.json delete mode 100644 archive/tap_hubspot/schemas/deals.json delete mode 100644 archive/tap_hubspot/schemas/email_events.json delete mode 100644 archive/tap_hubspot/schemas/engagements.json delete mode 100644 archive/tap_hubspot/schemas/forms.json delete mode 100644 archive/tap_hubspot/schemas/owners.json delete mode 100644 archive/tap_hubspot/schemas/subscription_changes.json delete mode 100644 archive/tap_hubspot/schemas/tickets.json delete mode 100644 archive/tap_hubspot/schemas/versions.json delete mode 100644 archive/tap_hubspot/schemas/workflows.json delete mode 100644 archive/tap_hubspot/tests/__init__.py delete mode 100644 archive/tap_hubspot/tests/test_bookmarks.py delete mode 100644 archive/tap_hubspot/tests/test_deals.py delete mode 100644 archive/tap_hubspot/tests/test_get_streams_to_sync.py delete mode 100644 archive/tap_hubspot/tests/test_offsets.py delete mode 100644 archive/tap_hubspot/tests/unittests/test_get_start.py delete mode 100644 archive/tap_hubspot/tests/unittests/test_request_timeout.py delete mode 100644 archive/tap_hubspot/tests/unittests/test_tickets.py delete mode 100644 archive/tap_hubspot/tests/utils.py delete mode 100644 archive/tests/base.py delete mode 100644 archive/tests/client.py delete mode 100644 archive/tests/client_tester.py delete mode 100644 archive/tests/test_hubspot_all_fields.py delete mode 100644 archive/tests/test_hubspot_automatic_fields.py delete mode 100644 archive/tests/test_hubspot_bookmarks.py delete mode 100644 archive/tests/test_hubspot_bookmarks_static.py delete mode 100644 archive/tests/test_hubspot_child_stream_only.py delete mode 100644 archive/tests/test_hubspot_discovery.py delete mode 100644 archive/tests/test_hubspot_interrupted_sync.py delete mode 100644 archive/tests/test_hubspot_interrupted_sync_offset.py delete mode 100644 archive/tests/test_hubspot_pagination.py delete mode 100644 archive/tests/test_hubspot_start_date.py delete mode 100644 archive/tests/unittests/test_deals.py diff --git a/archive/.github/pull_request_template.md b/archive/.github/pull_request_template.md deleted file mode 100644 index 6e46b00..0000000 --- a/archive/.github/pull_request_template.md +++ /dev/null @@ -1,11 +0,0 @@ -# Description of change -(write a short description or paste a link to JIRA) - -# Manual QA steps - - - -# Risks - - - -# Rollback steps - - revert this branch diff --git a/archive/CHANGELOG.md b/archive/CHANGELOG.md deleted file mode 100644 index 4077450..0000000 --- a/archive/CHANGELOG.md +++ /dev/null @@ -1,104 +0,0 @@ -# Changelog - -## 2.12.1 - * Use sync start time for writing bookmarks [#226](https://github.com/singer-io/tap-hubspot/pull/226) - -## 2.12.0 - * Include properties(default + custom) in tickets stream [#220](https://github.com/singer-io/tap-hubspot/pull/220) - -## 2.11.0 - * Implement new stream - `tickets` [#218](https://github.com/singer-io/tap-hubspot/pull/218) - * Update integration tests for the tickets stream implementation [#219](https://github.com/singer-io/tap-hubspot/pull/219) - -## 2.10.0 - * Updated replication method as INCREMENTAL and replication key as property_hs_lastmodifieddate for deals and companies streams [#195](https://github.com/singer-io/tap-hubspot/pull/195) - * Fixed Pylint errors [#204](https://github.com/singer-io/tap-hubspot/pull/204) - -## 2.9.6 - * Implement Request Timeout [#177](https://github.com/singer-io/tap-hubspot/pull/177) - * Add version timestamp in contacts [#191](https://github.com/singer-io/tap-hubspot/pull/191 - -## 2.9.5 - * Fixes a bug in sending the fields to the v3 Deals endpoint [#145](https://github.com/singer-io/tap-hubspot/pull/145) - -## 2.9.4 - * Reverts 142 [#144](https://github.com/singer-io/tap-hubspot/pull/144) - -## 2.9.3 - * Add support for property_versions [#142](https://github.com/singer-io/tap-hubspot/pull/142) - -## 2.9.2 - * Change `POST` to V3 Deals to use one non-standard field instead of all fields we want [#139](https://github.com/singer-io/tap-hubspot/pull/139) - * See the pull request for a more detailed explaination - -## 2.9.1 - * Add retry logic to V3 calls [#136](https://github.com/singer-io/tap-hubspot/pull/136) - -## 2.9.0 - * Add fields to Deals stream - `hs_date_entered_*` and `hs_date_exited_*` [#133](https://github.com/singer-io/tap-hubspot/pull/133) - -## 2.8.1 - * Reverts `v2.8.0` back to `v.2.7.0` - -## 2.8.0 - * Add fields to Deals stream - `hs_date_entered_*` and `hs_date_exited_*` [#124](https://github.com/singer-io/tap-hubspot/pull/124) - -## 2.7.0 - * Fields nested under `properties` are copied to top level and prepended with `property_` [#107](https://github.com/singer-io/tap-hubspot/pull/107) - -## 2.6.5 - * For `deals` stream, use `includeAllProperties` flag instead of appending all properties to request url [#112](https://github.com/singer-io/tap-hubspot/pull/112) - -## 2.6.4 - * When making `deals` requests, only attach `properties` if selected [#102](https://github.com/singer-io/tap-hubspot/pull/102) - -## 2.6.3 - * Use the metadata library better - -## 2.6.2 - * Revert the revert. Go back to v2.6.0. - -## 2.6.1 - * Revert v2.6.0 to v.2.5.2 - -## 2.6.0 - * Replaced `annotated_schema` with Singer `metadata` - * Added integration tests to CircleCI - -## 2.5.2 - * Companies and Engagements have a new pattern to catch records that are updated during a long-running sync. Rather than using a lookback window, the bookmark value will be limited to the `min(current_sync_start, max_bk_seen)` [#98](https://github.com/singer-io/tap-hubspot/pull/98) - -## 2.4.0 - * The owners stream can optionally fetch "inactive owners" [#92](https://github.com/singer-io/tap-hubspot/pull/92) - -## 2.3.0 - * Engagements will now track how long the stream takes to sync, and look back on the next run by that amount to cover potentially missed updates due to asynchronous updates during the previous sync [#91](https://github.com/singer-io/tap-hubspot/pull/91) - -## 2.2.8 - * When resuming an interrupted sync, will now attempt all streams before exiting [#90](https://github.com/singer-io/tap-hubspot/pull/90) - -## 2.2.7 - * Add `delivered`, `forward`, `print`, `reply`, `spamreport` to `campaigns.counters` - -## 2.2.6 - * Change a loop over `dict.items()` to `dict.values()` because the keys returned were not being used [#82](https://github.com/singer-io/tap-hubspot/pull/82) - -## 2.2.5 - * Update version of `requests` to `2.20.0` in response to CVE 2018-18074 - -## 2.2.4 - * Ensure that deal associations are being retrieved if `associations` are selected in the catalog [#79](https://github.com/singer-io/tap-hubspot/pull/79) - -## 2.2.3 - * Scrub the access token from error messages Hubspot returns when there are insufficient permissions [#75](https://github.com/singer-io/tap-hubspot/pull/75) - -## 2.2.2 - * Fix a bug with the 'engagements' stream which requires the 'engagement' field to have automatic inclusion [#74](https://github.com/singer-io/tap-hubspot/pull/74) - -## 2.2.1 - * Fix a bug with the 'inclusion' metadata for replication_key fields [#72](https://github.com/singer-io/tap-hubspot/pull/72) - -## 2.2.0 - * Adds property selection to the tap [#67](https://github.com/singer-io/tap-hubspot/pull/67) - * Removed the keywords stream as it is deprecated [#68](https://github.com/singer-io/tap-hubspot/pull/68) - * Schema updates [#69](https://github.com/singer-io/tap-hubspot/pull/69) [#70](https://github.com/singer-io/tap-hubspot/pull/70) diff --git a/archive/LICENSE b/archive/LICENSE deleted file mode 100644 index 627c3e9..0000000 --- a/archive/LICENSE +++ /dev/null @@ -1,620 +0,0 @@ - GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - \ No newline at end of file diff --git a/archive/MANIFEST.in b/archive/MANIFEST.in deleted file mode 100644 index be81b9f..0000000 --- a/archive/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include LICENSE -include tap_hubspot/schemas/*.json diff --git a/archive/README.md b/archive/README.md deleted file mode 100644 index 77c99ac..0000000 --- a/archive/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# tap-hubspot - -This is a [Singer](https://singer.io) tap that produces JSON-formatted data following the [Singer spec](https://github.com/singer-io/getting-started/blob/master/SPEC.md). - -This tap: -- Pulls raw data from HubSpot's [REST API](http://developers.hubspot.com/docs/overview) -- Extracts the following resources from HubSpot - - [Campaigns](http://developers.hubspot.com/docs/methods/email/get_campaign_data) - - [Companies](http://developers.hubspot.com/docs/methods/companies/get_company) - - [Contacts](https://developers.hubspot.com/docs/methods/contacts/get_contacts) - - [Contact Lists](http://developers.hubspot.com/docs/methods/lists/get_lists) - - [Deals](http://developers.hubspot.com/docs/methods/deals/get_deals_modified) - - [Deal Pipelines](https://developers.hubspot.com/docs/methods/deal-pipelines/get-all-deal-pipelines) - - [Email Events](http://developers.hubspot.com/docs/methods/email/get_events) - - [Engagements](https://developers.hubspot.com/docs/methods/engagements/get-all-engagements) - - [Forms](http://developers.hubspot.com/docs/methods/forms/v2/get_forms) - - [Keywords](http://developers.hubspot.com/docs/methods/keywords/get_keywords) - - [Owners](http://developers.hubspot.com/docs/methods/owners/get_owners) - - [Subscription Changes](http://developers.hubspot.com/docs/methods/email/get_subscriptions_timeline) - - [Workflows](http://developers.hubspot.com/docs/methods/workflows/v3/get_workflows) - - [Tickets](https://developers.hubspot.com/docs/api/crm/tickets) -- Outputs the schema for each resource -- Incrementally pulls data based on the input state - -## Configuration - -This tap requires a `config.json` which specifies details regarding [OAuth 2.0](https://developers.hubspot.com/docs/methods/oauth2/oauth2-overview) authentication, a cutoff date for syncing historical data, an optional parameter request_timeout for which request should wait to get the response and an optional flag which controls collection of anonymous usage metrics. See [config.sample.json](config.sample.json) for an example. You may specify an API key instead of OAuth parameters for development purposes, as detailed below. - -To run `tap-hubspot` with the configuration file, use this command: - -```bash -› tap-hubspot -c my-config.json -``` - - -## API Key Authentication (for development) - -As an alternative to OAuth 2.0 authentication during development, you may specify an API key (`HAPIKEY`) to authenticate with the HubSpot API. This should be used only for low-volume development work, as the [HubSpot API Usage Guidelines](https://developers.hubspot.com/apps/api_guidelines) specify that integrations should use OAuth for authentication. - -To use an API key, include a `hapikey` configuration variable in your `config.json` and set it to the value of your HubSpot API key. Any OAuth authentication parameters in your `config.json` **will be ignored** if this key is present! - ---- - -Copyright © 2017 Stitch diff --git a/archive/bin/run-a-test.sh b/archive/bin/run-a-test.sh deleted file mode 100755 index fc7a032..0000000 --- a/archive/bin/run-a-test.sh +++ /dev/null @@ -1,5 +0,0 @@ -set -exu -TEST_FILE=$1 -TEST_CLASS=$2 -TEST_NAME=$3 -nosetests tap_hubspot/tests/$TEST_FILE:$TEST_CLASS.$TEST_NAME diff --git a/archive/bin/run-all-tests.sh b/archive/bin/run-all-tests.sh deleted file mode 100755 index 9fa1873..0000000 --- a/archive/bin/run-all-tests.sh +++ /dev/null @@ -1 +0,0 @@ -nosetests tap_hubspot/tests/ diff --git a/archive/config.sample.json b/archive/config.sample.json deleted file mode 100644 index 0a93849..0000000 --- a/archive/config.sample.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "redirect_uri": "https://api.hubspot.com/", - "client_id": 123456789000, - "client_secret": "my_secret", - "refresh_token": "my_token", - "start_date": "2017-01-01T00:00:00Z", - "request_timeout": 300, - "disable_collection": false -} diff --git a/archive/setup.cfg b/archive/setup.cfg deleted file mode 100644 index b88034e..0000000 --- a/archive/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[metadata] -description-file = README.md diff --git a/archive/setup.py b/archive/setup.py deleted file mode 100644 index 8b8be70..0000000 --- a/archive/setup.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python - -from setuptools import setup - -setup(name='tap-hubspot', - version='2.12.1', - description='Singer.io tap for extracting data from the HubSpot API', - author='Stitch', - url='http://singer.io', - classifiers=['Programming Language :: Python :: 3 :: Only'], - py_modules=['tap_hubspot'], - install_requires=[ - 'attrs==16.3.0', - 'singer-python==5.13.0', - 'requests==2.20.0', - 'backoff==1.8.0', - 'requests_mock==1.3.0', - ], - extras_require= { - 'dev': [ - 'pylint==2.5.3', - 'nose==1.3.7', - ] - }, - entry_points=''' - [console_scripts] - tap-hubspot=tap_hubspot:main - ''', - packages=['tap_hubspot'], - package_data = { - 'tap_hubspot/schemas': [ - "campaigns.json", - "companies.json", - "contact_lists.json", - "contacts.json", - "deals.json", - "email_events.json", - "forms.json", - "keywords.json", - "owners.json", - "subscription_changes.json", - "workflows.json", - ], - }, - include_package_data=True, -) diff --git a/archive/tap_hubspot/__init__.py b/archive/tap_hubspot/__init__.py deleted file mode 100644 index bd23ff4..0000000 --- a/archive/tap_hubspot/__init__.py +++ /dev/null @@ -1,1306 +0,0 @@ -#!/usr/bin/env python3 -import datetime -import pytz -import itertools -import os -import re -import sys -import json -# pylint: disable=import-error -import attr -import backoff -import requests -import singer -import singer.messages -from singer import metrics -from singer import metadata -from singer import utils -from singer import (transform, - UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING, - Transformer, _transform_datetime) - -LOGGER = singer.get_logger() -SESSION = requests.Session() - -REQUEST_TIMEOUT = 300 -class InvalidAuthException(Exception): - pass - -class SourceUnavailableException(Exception): - pass - -class DependencyException(Exception): - pass - -class UriTooLongException(Exception): - pass - -class DataFields: - offset = 'offset' - -class StateFields: - offset = 'offset' - this_stream = 'this_stream' - -BASE_URL = "https://api.hubapi.com" - -CONTACTS_BY_COMPANY = "contacts_by_company" - -DEFAULT_CHUNK_SIZE = 1000 * 60 * 60 * 24 - -V3_PREFIXES = {'hs_date_entered', 'hs_date_exited', 'hs_time_in'} - -CONFIG = { - "access_token": None, - "token_expires": None, - "email_chunk_size": DEFAULT_CHUNK_SIZE, - "subscription_chunk_size": DEFAULT_CHUNK_SIZE, - - # in config.json - "redirect_uri": None, - "client_id": None, - "client_secret": None, - "refresh_token": None, - "start_date": None, - "hapikey": None, - "include_inactives": None, -} - -ENDPOINTS = { - "contacts_properties": "/properties/v1/contacts/properties", - "contacts_all": "/contacts/v1/lists/all/contacts/all", - "contacts_recent": "/contacts/v1/lists/recently_updated/contacts/recent", - "contacts_detail": "/contacts/v1/contact/vids/batch/", - - "companies_properties": "/companies/v2/properties", - "companies_all": "/companies/v2/companies/paged", - "companies_recent": "/companies/v2/companies/recent/modified", - "companies_detail": "/companies/v2/companies/{company_id}", - "contacts_by_company": "/companies/v2/companies/{company_id}/vids", - - "deals_properties": "/properties/v1/deals/properties", - "deals_all": "/deals/v1/deal/paged", - "deals_recent": "/deals/v1/deal/recent/modified", - "deals_detail": "/deals/v1/deal/{deal_id}", - - "deals_v3_batch_read": "/crm/v3/objects/deals/batch/read", - "deals_v3_properties": "/crm/v3/properties/deals", - - "deal_pipelines": "/deals/v1/pipelines", - - "campaigns_all": "/email/public/v1/campaigns/by-id", - "campaigns_detail": "/email/public/v1/campaigns/{campaign_id}", - - "engagements_all": "/engagements/v1/engagements/paged", - - "subscription_changes": "/email/public/v1/subscriptions/timeline", - "email_events": "/email/public/v1/events", - "contact_lists": "/contacts/v1/lists", - "forms": "/forms/v2/forms", - "workflows": "/automation/v3/workflows", - "owners": "/owners/v2/owners", - - "tickets_properties": "/crm/v3/properties/tickets", - "tickets": "/crm/v4/objects/tickets", -} - -def get_start(state, tap_stream_id, bookmark_key, older_bookmark_key=None): - """ - If the current bookmark_key is available in the state, then return the bookmark_key value. - If it is not available then check and return the older_bookmark_key in the state for the existing connection. - If none of the keys are available in the state for a particular stream, then return start_date. - - We have made this change because of an update in the replication key of the deals stream. - So, if any existing connections have only older_bookmark_key in the state then tap should utilize that bookmark value. - Then next time, the tap should use the current bookmark value. - """ - current_bookmark = singer.get_bookmark(state, tap_stream_id, bookmark_key) - if current_bookmark is None: - if older_bookmark_key: - previous_bookmark = singer.get_bookmark(state, tap_stream_id, older_bookmark_key) - if previous_bookmark: - return previous_bookmark - - return CONFIG['start_date'] - return current_bookmark - -def get_current_sync_start(state, tap_stream_id): - current_sync_start_value = singer.get_bookmark(state, tap_stream_id, "current_sync_start") - if current_sync_start_value is None: - return current_sync_start_value - return utils.strptime_to_utc(current_sync_start_value) - -def write_current_sync_start(state, tap_stream_id, start): - value = start - if start is not None: - value = utils.strftime(start) - return singer.write_bookmark(state, tap_stream_id, "current_sync_start", value) - -def clean_state(state): - """ Clear deprecated keys out of state. """ - for stream, bookmark_map in state.get("bookmarks", {}).items(): - if "last_sync_duration" in bookmark_map: - LOGGER.info("%s - Removing last_sync_duration from state.", stream) - state["bookmarks"][stream].pop("last_sync_duration", None) - -def get_selected_property_fields(catalog, mdata): - - fields = catalog.get("schema").get("properties").keys() - property_field_names = [] - for field in fields: - if "property_" in field: - field_metadata = mdata.get(('properties', field)) - if utils.should_sync_field(field_metadata.get('inclusion'), - field_metadata.get('selected')): - property_field_names.append(field.split("property_", 1)[1]) - return ",".join(property_field_names) - -def get_url(endpoint, **kwargs): - if endpoint not in ENDPOINTS: - raise ValueError("Invalid endpoint {}".format(endpoint)) - - return BASE_URL + ENDPOINTS[endpoint].format(**kwargs) - - -def get_field_type_schema(field_type): - if field_type == "bool": - return {"type": ["null", "boolean"]} - - elif field_type == "datetime": - return {"type": ["null", "string"], - "format": "date-time"} - - elif field_type == "number": - # A value like 'N/A' can be returned for this type, - # so we have to let this be a string sometimes - return {"type": ["null", "number", "string"]} - - else: - return {"type": ["null", "string"]} - -def get_field_schema(field_type, extras=False): - if extras: - return { - "type": "object", - "properties": { - "value": get_field_type_schema(field_type), - "timestamp": get_field_type_schema("datetime"), - "source": get_field_type_schema("string"), - "sourceId": get_field_type_schema("string"), - } - } - else: - return { - "type": "object", - "properties": { - "value": get_field_type_schema(field_type), - } - } - -def parse_custom_schema(entity_name, data): - if entity_name == "tickets": - return { - field['name']: get_field_type_schema(field['type']) - for field in data["results"] - } - - return { - field['name']: get_field_schema(field['type'], entity_name != 'contacts') - for field in data - } - - -def get_custom_schema(entity_name): - return parse_custom_schema(entity_name, request(get_url(entity_name + "_properties")).json()) - -def get_v3_schema(entity_name): - url = get_url("deals_v3_properties") - return parse_custom_schema(entity_name, request(url).json()['results']) - -def get_abs_path(path): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), path) - -def load_associated_company_schema(): - associated_company_schema = load_schema("companies") - #pylint: disable=line-too-long - associated_company_schema['properties']['company-id'] = associated_company_schema['properties'].pop('companyId') - associated_company_schema['properties']['portal-id'] = associated_company_schema['properties'].pop('portalId') - return associated_company_schema - -def load_schema(entity_name): - schema = utils.load_json(get_abs_path('schemas/{}.json'.format(entity_name))) - if entity_name in ["contacts", "companies", "deals", "tickets"]: - custom_schema = get_custom_schema(entity_name) - - schema['properties']['properties'] = { - "type": "object", - "properties": custom_schema, - } - - if entity_name in ["deals"]: - v3_schema = get_v3_schema(entity_name) - for key, value in v3_schema.items(): - if any(prefix in key for prefix in V3_PREFIXES): - custom_schema[key] = value - - # Move properties to top level - custom_schema_top_level = {'property_{}'.format(k): v for k, v in custom_schema.items()} - schema['properties'].update(custom_schema_top_level) - - # Exclude properties_versions field for tickets stream. As the versions are not present in - # the api response. - if entity_name != "tickets": - # Make properties_versions selectable and share the same schema. - versions_schema = utils.load_json(get_abs_path('schemas/versions.json')) - schema['properties']['properties_versions'] = versions_schema - - if entity_name == "contacts": - schema['properties']['associated-company'] = load_associated_company_schema() - - return schema - -#pylint: disable=invalid-name -def acquire_access_token_from_refresh_token(): - payload = { - "grant_type": "refresh_token", - "redirect_uri": CONFIG['redirect_uri'], - "refresh_token": CONFIG['refresh_token'], - "client_id": CONFIG['client_id'], - "client_secret": CONFIG['client_secret'], - } - - - resp = requests.post(BASE_URL + "/oauth/v1/token", data=payload, timeout=get_request_timeout()) - if resp.status_code == 403: - raise InvalidAuthException(resp.content) - - resp.raise_for_status() - auth = resp.json() - CONFIG['access_token'] = auth['access_token'] - CONFIG['refresh_token'] = auth['refresh_token'] - CONFIG['token_expires'] = ( - datetime.datetime.utcnow() + - datetime.timedelta(seconds=auth['expires_in'] - 600)) - LOGGER.info("Token refreshed. Expires at %s", CONFIG['token_expires']) - - -def giveup(exc): - return exc.response is not None \ - and 400 <= exc.response.status_code < 500 \ - and exc.response.status_code != 429 - -def on_giveup(details): - if len(details['args']) == 2: - url, params = details['args'] - else: - url = details['args'] - params = {} - - raise Exception("Giving up on request after {} tries with url {} and params {}" \ - .format(details['tries'], url, params)) - -URL_SOURCE_RE = re.compile(BASE_URL + r'/(\w+)/') - -def parse_source_from_url(url): - match = URL_SOURCE_RE.match(url) - if match: - return match.group(1) - return None - -def get_params_and_headers(params): - """ - This function makes a params object and headers object based on the - authentication values available. If there is an `hapikey` in the config, we - need that in `params` and not in the `headers`. Otherwise, we need to get an - `access_token` to put in the `headers` and not in the `params` - """ - params = params or {} - hapikey = CONFIG['hapikey'] - if hapikey is None: - if CONFIG['token_expires'] is None or CONFIG['token_expires'] < datetime.datetime.utcnow(): - acquire_access_token_from_refresh_token() - headers = {'Authorization': 'Bearer {}'.format(CONFIG['access_token'])} - else: - params['hapikey'] = hapikey - headers = {} - - if 'user_agent' in CONFIG: - headers['User-Agent'] = CONFIG['user_agent'] - - return params, headers - - -# backoff for Timeout error is already included in "requests.exceptions.RequestException" -# as it is a parent class of "Timeout" error -@backoff.on_exception(backoff.constant, - (requests.exceptions.RequestException, - requests.exceptions.HTTPError), - max_tries=5, - jitter=None, - giveup=giveup, - on_giveup=on_giveup, - interval=10) -def request(url, params=None): - - params, headers = get_params_and_headers(params) - - req = requests.Request('GET', url, params=params, headers=headers).prepare() - LOGGER.info("GET %s", req.url) - with metrics.http_request_timer(parse_source_from_url(url)) as timer: - resp = SESSION.send(req, timeout=get_request_timeout()) - timer.tags[metrics.Tag.http_status_code] = resp.status_code - if resp.status_code == 403: - raise SourceUnavailableException(resp.content) - elif resp.status_code == 414: - raise UriTooLongException(resp.content) - resp.raise_for_status() - - return resp -# {"bookmarks" : {"contacts" : { "lastmodifieddate" : "2001-01-01" -# "offset" : {"vidOffset": 1234 -# "timeOffset": "3434434 }} -# "users" : { "timestamp" : "2001-01-01"}} -# "currently_syncing" : "contacts" -# } -# } - -def lift_properties_and_versions(record): - for key, value in record.get('properties', {}).items(): - computed_key = "property_{}".format(key) - record[computed_key] = value - if isinstance(value, dict): - versions = value.get('versions') - if versions: - if not record.get('properties_versions'): - record['properties_versions'] = [] - record['properties_versions'] += versions - return record - -# backoff for Timeout error is already included in "requests.exceptions.RequestException" -# as it is a parent class of "Timeout" error -@backoff.on_exception(backoff.constant, - (requests.exceptions.RequestException, - requests.exceptions.HTTPError), - max_tries=5, - jitter=None, - giveup=giveup, - on_giveup=on_giveup, - interval=10) -def post_search_endpoint(url, data, params=None): - - params, headers = get_params_and_headers(params) - headers['content-type'] = "application/json" - - with metrics.http_request_timer(url) as _: - resp = requests.post( - url=url, - json=data, - params=params, - timeout=get_request_timeout(), - headers=headers - ) - - resp.raise_for_status() - - return resp - -def merge_responses(v1_data, v3_data): - for v1_record in v1_data: - v1_id = v1_record.get('dealId') - for v3_record in v3_data: - v3_id = v3_record.get('id') - if str(v1_id) == v3_id: - v1_record['properties'] = {**v1_record['properties'], - **v3_record['properties']} - -def process_v3_deals_records(v3_data): - """ - This function: - 1. filters out fields that don't contain 'hs_date_entered_*' and - 'hs_date_exited_*' - 2. changes a key value pair in `properties` to a key paired to an - object with a key 'value' and the original value - """ - transformed_v3_data = [] - for record in v3_data: - new_properties = {field_name : {'value': field_value} - for field_name, field_value in record['properties'].items() - if any(prefix in field_name for prefix in V3_PREFIXES)} - transformed_v3_data.append({**record, 'properties' : new_properties}) - return transformed_v3_data - -def get_v3_deals(v3_fields, v1_data): - v1_ids = [{'id': str(record['dealId'])} for record in v1_data] - - # Sending the first v3_field is enough to get them all - v3_body = {'inputs': v1_ids, - 'properties': [v3_fields[0]],} - v3_url = get_url('deals_v3_batch_read') - v3_resp = post_search_endpoint(v3_url, v3_body) - return v3_resp.json()['results'] - -#pylint: disable=line-too-long -def gen_request(STATE, tap_stream_id, url, params, path, more_key, offset_keys, offset_targets, v3_fields=None): - if len(offset_keys) != len(offset_targets): - raise ValueError("Number of offset_keys must match number of offset_targets") - - if singer.get_offset(STATE, tap_stream_id): - params.update(singer.get_offset(STATE, tap_stream_id)) - - with metrics.record_counter(tap_stream_id) as counter: - while True: - data = request(url, params).json() - - if data.get(path) is None: - raise RuntimeError("Unexpected API response: {} not in {}".format(path, data.keys())) - - if v3_fields: - v3_data = get_v3_deals(v3_fields, data[path]) - - # The shape of v3_data is different than the V1 response, - # so we transform v3 to look like v1 - transformed_v3_data = process_v3_deals_records(v3_data) - merge_responses(data[path], transformed_v3_data) - - for row in data[path]: - counter.increment() - yield row - - if not data.get(more_key, False): - break - - STATE = singer.clear_offset(STATE, tap_stream_id) - for key, target in zip(offset_keys, offset_targets): - if key in data: - params[target] = data[key] - STATE = singer.set_offset(STATE, tap_stream_id, target, data[key]) - - singer.write_state(STATE) - - STATE = singer.clear_offset(STATE, tap_stream_id) - singer.write_state(STATE) - - -def _sync_contact_vids(catalog, vids, schema, bumble_bee, bookmark_values, bookmark_key): - if len(vids) == 0: - return - - data = request(get_url("contacts_detail"), params={'vid': vids, 'showListMemberships' : True, "formSubmissionMode" : "all"}).json() - time_extracted = utils.now() - mdata = metadata.to_map(catalog.get('metadata')) - for record in data.values(): - # Explicitly add the bookmark field "versionTimestamp" and its value in the record. - record[bookmark_key] = bookmark_values.get(record.get("vid")) - record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata) - singer.write_record("contacts", record, catalog.get('stream_alias'), time_extracted=time_extracted) - -default_contact_params = { - 'showListMemberships': True, - 'includeVersion': True, - 'count': 100, -} - -def sync_contacts(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - bookmark_key = 'versionTimestamp' - start = utils.strptime_with_tz(get_start(STATE, "contacts", bookmark_key)) - LOGGER.info("sync_contacts from %s", start) - - max_bk_value = start - schema = load_schema("contacts") - - singer.write_schema("contacts", schema, ["vid"], [bookmark_key], catalog.get('stream_alias')) - - url = get_url("contacts_all") - - vids = [] - # Dict to store replication key value for each contact record - bookmark_values = {} - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - # To handle records updated between start of the table sync and the end, - # store the current sync start in the state and not move the bookmark past this value. - sync_start_time = utils.now() - for row in gen_request(STATE, 'contacts', url, default_contact_params, 'contacts', 'has-more', ['vid-offset'], ['vidOffset']): - modified_time = None - if bookmark_key in row: - modified_time = utils.strptime_with_tz( - _transform_datetime( # pylint: disable=protected-access - row[bookmark_key], - UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING)) - - if not modified_time or modified_time >= start: - vids.append(row['vid']) - # Adding replication key value in `bookmark_values` dict - # Here, key is vid(primary key) and value is replication key value. - bookmark_values[row['vid']] = utils.strftime(modified_time) - - if modified_time and modified_time >= max_bk_value: - max_bk_value = modified_time - - if len(vids) == 100: - _sync_contact_vids(catalog, vids, schema, bumble_bee, bookmark_values, bookmark_key) - vids = [] - - _sync_contact_vids(catalog, vids, schema, bumble_bee, bookmark_values, bookmark_key) - - # Don't bookmark past the start of this sync to account for updated records during the sync. - new_bookmark = min(max_bk_value, sync_start_time) - STATE = singer.write_bookmark(STATE, 'contacts', bookmark_key, utils.strftime(new_bookmark)) - singer.write_state(STATE) - return STATE - -class ValidationPredFailed(Exception): - pass - -# companies_recent only supports 10,000 results. If there are more than this, -# we'll need to use the companies_all endpoint -def use_recent_companies_endpoint(response): - return response["total"] < 10000 - -default_contacts_by_company_params = {'count' : 100} - -# NB> to do: support stream aliasing and field selection -def _sync_contacts_by_company(STATE, ctx, company_id): - schema = load_schema(CONTACTS_BY_COMPANY) - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - url = get_url("contacts_by_company", company_id=company_id) - path = 'vids' - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - with metrics.record_counter(CONTACTS_BY_COMPANY) as counter: - data = request(url, default_contacts_by_company_params).json() - - if data.get(path) is None: - raise RuntimeError("Unexpected API response: {} not in {}".format(path, data.keys())) - - for row in data[path]: - counter.increment() - record = {'company-id' : company_id, - 'contact-id' : row} - record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata) - singer.write_record("contacts_by_company", record, time_extracted=utils.now()) - - return STATE - -default_company_params = { - 'limit': 250, 'properties': ["createdate", "hs_lastmodifieddate"] -} - -def sync_companies(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - bumble_bee = Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) - bookmark_key = 'property_hs_lastmodifieddate' - bookmark_field_in_record = 'hs_lastmodifieddate' - - start = utils.strptime_to_utc(get_start(STATE, "companies", bookmark_key, older_bookmark_key=bookmark_field_in_record)) - LOGGER.info("sync_companies from %s", start) - schema = load_schema('companies') - singer.write_schema("companies", schema, ["companyId"], [bookmark_key], catalog.get('stream_alias')) - - # Because this stream doesn't query by `lastUpdated`, it cycles - # through the data set every time. The issue with this is that there - # is a race condition by which records may be updated between the - # start of this table's sync and the end, causing some updates to not - # be captured, in order to combat this, we must store the current - # sync's start in the state and not move the bookmark past this value. - current_sync_start = get_current_sync_start(STATE, "companies") or utils.now() - STATE = write_current_sync_start(STATE, "companies", current_sync_start) - singer.write_state(STATE) - - url = get_url("companies_all") - max_bk_value = start - if CONTACTS_BY_COMPANY in ctx.selected_stream_ids: - contacts_by_company_schema = load_schema(CONTACTS_BY_COMPANY) - singer.write_schema("contacts_by_company", contacts_by_company_schema, ["company-id", "contact-id"]) - - with bumble_bee: - for row in gen_request(STATE, 'companies', url, default_company_params, 'companies', 'has-more', ['offset'], ['offset']): - row_properties = row['properties'] - modified_time = None - if bookmark_field_in_record in row_properties: - # Hubspot returns timestamps in millis - timestamp_millis = row_properties[bookmark_field_in_record]['timestamp'] / 1000.0 - modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc) - elif 'createdate' in row_properties: - # Hubspot returns timestamps in millis - timestamp_millis = row_properties['createdate']['timestamp'] / 1000.0 - modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc) - - if modified_time and modified_time >= max_bk_value: - max_bk_value = modified_time - - if not modified_time or modified_time >= start: - record = request(get_url("companies_detail", company_id=row['companyId'])).json() - record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata) - singer.write_record("companies", record, catalog.get('stream_alias'), time_extracted=utils.now()) - if CONTACTS_BY_COMPANY in ctx.selected_stream_ids: - STATE = _sync_contacts_by_company(STATE, ctx, record['companyId']) - - # Don't bookmark past the start of this sync to account for updated records during the sync. - new_bookmark = min(max_bk_value, current_sync_start) - STATE = singer.write_bookmark(STATE, 'companies', bookmark_key, utils.strftime(new_bookmark)) - STATE = write_current_sync_start(STATE, 'companies', None) - singer.write_state(STATE) - return STATE - -def has_selected_custom_field(mdata): - top_level_custom_props = [x for x in mdata if len(x) == 2 and 'property_' in x[1]] - for prop in top_level_custom_props: - # Return 'True' if the custom field is automatic. - if (mdata.get(prop, {}).get('selected') is True) or (mdata.get(prop, {}).get('inclusion') == "automatic"): - return True - return False - -def sync_deals(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - bookmark_key = 'property_hs_lastmodifieddate' - # The Bookmark field('hs_lastmodifieddate') available in the record is different from - # the tap's bookmark key(property_hs_lastmodifieddate). - # `hs_lastmodifieddate` is available in the properties field at the nested level. - # As `hs_lastmodifieddate` is not available at the 1st level it can not be marked as automatic inclusion. - # tap includes all nested fields of the properties field as custom fields in the schema by appending the - # prefix `property_` along with each field. - # That's why bookmark_key is `property_hs_lastmodifieddate` so that we can mark it as automatic inclusion. - - last_modified_date = 'hs_lastmodifieddate' - - # Tap was used to write bookmark using replication key `hs_lastmodifieddate`. - # Now, as the replication key gets changed to "property_hs_lastmodifieddate", `get_start` function would return - # bookmark value of older bookmark key(`hs_lastmodifieddate`) if it is available. - # So, here `older_bookmark_key` is the previous bookmark key that may be available in the state of - # the existing connection. - - start = utils.strptime_with_tz(get_start(STATE, "deals", bookmark_key, older_bookmark_key=last_modified_date)) - max_bk_value = start - LOGGER.info("sync_deals from %s", start) - params = {'limit': 100, - 'includeAssociations': False, - 'properties' : []} - - schema = load_schema("deals") - singer.write_schema("deals", schema, ["dealId"], [bookmark_key], catalog.get('stream_alias')) - - # Check if we should include associations - for key in mdata.keys(): - if 'associations' in key: - assoc_mdata = mdata.get(key) - if (assoc_mdata.get('selected') and assoc_mdata.get('selected') is True): - params['includeAssociations'] = True - - v3_fields = None - has_selected_properties = mdata.get(('properties', 'properties'), {}).get('selected') - if has_selected_properties or has_selected_custom_field(mdata): - # On 2/12/20, hubspot added a lot of additional properties for - # deals, and appending all of them to requests ended up leading to - # 414 (url-too-long) errors. Hubspot recommended we use the - # `includeAllProperties` and `allpropertiesFetchMode` params - # instead. - params['includeAllProperties'] = True - params['allPropertiesFetchMode'] = 'latest_version' - - # Grab selected `hs_date_entered/exited` fields to call the v3 endpoint with - v3_fields = [breadcrumb[1].replace('property_', '') - for breadcrumb, mdata_map in mdata.items() - if breadcrumb - and (mdata_map.get('selected') is True or has_selected_properties) - and any(prefix in breadcrumb[1] for prefix in V3_PREFIXES)] - - url = get_url('deals_all') - - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - # To handle records updated between start of the table sync and the end, - # store the current sync start in the state and not move the bookmark past this value. - sync_start_time = utils.now() - for row in gen_request(STATE, 'deals', url, params, 'deals', "hasMore", ["offset"], ["offset"], v3_fields=v3_fields): - row_properties = row['properties'] - modified_time = None - if last_modified_date in row_properties: - # Hubspot returns timestamps in millis - timestamp_millis = row_properties[last_modified_date]['timestamp'] / 1000.0 - modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc) - elif 'createdate' in row_properties: - # Hubspot returns timestamps in millis - timestamp_millis = row_properties['createdate']['timestamp'] / 1000.0 - modified_time = datetime.datetime.fromtimestamp(timestamp_millis, datetime.timezone.utc) - if modified_time and modified_time >= max_bk_value: - max_bk_value = modified_time - - if not modified_time or modified_time >= start: - record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) - singer.write_record("deals", record, catalog.get('stream_alias'), time_extracted=utils.now()) - - # Don't bookmark past the start of this sync to account for updated records during the sync. - new_bookmark = min(max_bk_value, sync_start_time) - STATE = singer.write_bookmark(STATE, 'deals', bookmark_key, utils.strftime(new_bookmark)) - singer.write_state(STATE) - return STATE - - -def gen_request_tickets(tap_stream_id, url, params, path, more_key): - """ - Cursor-based API Pagination : Used in tickets stream implementation - """ - with metrics.record_counter(tap_stream_id) as counter: - while True: - data = request(url, params).json() - - if data.get(path) is None: - raise RuntimeError( - "Unexpected API response: {} not in {}".format(path, data.keys())) - - for row in data[path]: - counter.increment() - yield row - - if not data.get(more_key): - break - params['after'] = data.get(more_key).get('next').get('after') - -def sync_tickets(STATE, ctx): - """ - Function to sync `tickets` stream records - """ - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - stream_id = "tickets" - primary_key = "id" - bookmark_key = "updatedAt" - - max_bk_value = bookmark_value = utils.strptime_with_tz( - get_start(STATE, stream_id, bookmark_key)) - LOGGER.info("sync_tickets from %s", bookmark_value) - - params = {'limit': 100, - 'associations': 'contact,company,deals', - 'properties': get_selected_property_fields(catalog, mdata), - 'archived': False - } - - schema = load_schema(stream_id) - singer.write_schema(stream_id, schema, [primary_key], - [bookmark_key], catalog.get('stream_alias')) - - url = get_url(stream_id) - - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as transformer: - # To handle records updated between start of the table sync and the end, - # store the current sync start in the state and not move the bookmark past this value. - sync_start_time = utils.now() - for row in gen_request_tickets(stream_id, url, params, 'results', "paging"): - # parsing the string formatted date to datetime object - modified_time = utils.strptime_to_utc(row[bookmark_key]) - - # Checking the bookmark value is present on the record and it - # is greater than or equal to defined previous bookmark value - if modified_time and modified_time >= bookmark_value: - # transforms the data and filters out the selected fields from the catalog - record = transformer.transform(lift_properties_and_versions(row), schema, mdata) - singer.write_record(stream_id, record, catalog.get( - 'stream_alias'), time_extracted=utils.now()) - if modified_time and modified_time >= max_bk_value: - max_bk_value = modified_time - - # Don't bookmark past the start of this sync to account for updated records during the sync. - new_bookmark = min(max_bk_value, sync_start_time) - STATE = singer.write_bookmark(STATE, stream_id, bookmark_key, utils.strftime(new_bookmark)) - singer.write_state(STATE) - return STATE - - -# NB> no suitable bookmark is available: https://developers.hubspot.com/docs/methods/email/get_campaigns_by_id -def sync_campaigns(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - schema = load_schema("campaigns") - singer.write_schema("campaigns", schema, ["id"], catalog.get('stream_alias')) - LOGGER.info("sync_campaigns(NO bookmarks)") - url = get_url("campaigns_all") - params = {'limit': 500} - - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - for row in gen_request(STATE, 'campaigns', url, params, "campaigns", "hasMore", ["offset"], ["offset"]): - record = request(get_url("campaigns_detail", campaign_id=row['id'])).json() - record = bumble_bee.transform(lift_properties_and_versions(record), schema, mdata) - singer.write_record("campaigns", record, catalog.get('stream_alias'), time_extracted=utils.now()) - - return STATE - - -def sync_entity_chunked(STATE, catalog, entity_name, key_properties, path): - schema = load_schema(entity_name) - bookmark_key = 'startTimestamp' - - singer.write_schema(entity_name, schema, key_properties, [bookmark_key], catalog.get('stream_alias')) - - start = get_start(STATE, entity_name, bookmark_key) - LOGGER.info("sync_%s from %s", entity_name, start) - - now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) - now_ts = int(now.timestamp() * 1000) - - start_ts = int(utils.strptime_with_tz(start).timestamp() * 1000) - url = get_url(entity_name) - - mdata = metadata.to_map(catalog.get('metadata')) - - if entity_name == 'email_events': - window_size = int(CONFIG['email_chunk_size']) - elif entity_name == 'subscription_changes': - window_size = int(CONFIG['subscription_chunk_size']) - - with metrics.record_counter(entity_name) as counter: - while start_ts < now_ts: - end_ts = start_ts + window_size - params = { - 'startTimestamp': start_ts, - 'endTimestamp': end_ts, - 'limit': 1000, - } - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - while True: - our_offset = singer.get_offset(STATE, entity_name) - if bool(our_offset) and our_offset.get('offset') is not None: - params[StateFields.offset] = our_offset.get('offset') - - data = request(url, params).json() - time_extracted = utils.now() - - if data.get(path) is None: - raise RuntimeError("Unexpected API response: {} not in {}".format(path, data.keys())) - - for row in data[path]: - counter.increment() - record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) - singer.write_record(entity_name, - record, - catalog.get('stream_alias'), - time_extracted=time_extracted) - if data.get('hasMore'): - STATE = singer.set_offset(STATE, entity_name, 'offset', data['offset']) - singer.write_state(STATE) - else: - STATE = singer.clear_offset(STATE, entity_name) - singer.write_state(STATE) - break - STATE = singer.write_bookmark(STATE, entity_name, 'startTimestamp', utils.strftime(datetime.datetime.fromtimestamp((start_ts / 1000), datetime.timezone.utc))) # pylint: disable=line-too-long - singer.write_state(STATE) - start_ts = end_ts - - STATE = singer.clear_offset(STATE, entity_name) - singer.write_state(STATE) - return STATE - -def sync_subscription_changes(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - STATE = sync_entity_chunked(STATE, catalog, "subscription_changes", ["timestamp", "portalId", "recipient"], - "timeline") - return STATE - -def sync_email_events(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - STATE = sync_entity_chunked(STATE, catalog, "email_events", ["id"], "events") - return STATE - -def sync_contact_lists(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - schema = load_schema("contact_lists") - bookmark_key = 'updatedAt' - singer.write_schema("contact_lists", schema, ["listId"], [bookmark_key], catalog.get('stream_alias')) - - start = get_start(STATE, "contact_lists", bookmark_key) - max_bk_value = start - - LOGGER.info("sync_contact_lists from %s", start) - - url = get_url("contact_lists") - params = {'count': 250} - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - # To handle records updated between start of the table sync and the end, - # store the current sync start in the state and not move the bookmark past this value. - sync_start_time = utils.now() - for row in gen_request(STATE, 'contact_lists', url, params, "lists", "has-more", ["offset"], ["offset"]): - record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) - - if record[bookmark_key] >= start: - singer.write_record("contact_lists", record, catalog.get('stream_alias'), time_extracted=utils.now()) - if record[bookmark_key] >= max_bk_value: - max_bk_value = record[bookmark_key] - - # Don't bookmark past the start of this sync to account for updated records during the sync. - new_bookmark = min(utils.strptime_to_utc(max_bk_value), sync_start_time) - STATE = singer.write_bookmark(STATE, 'contact_lists', bookmark_key, utils.strftime(new_bookmark)) - singer.write_state(STATE) - - return STATE - -def sync_forms(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - schema = load_schema("forms") - bookmark_key = 'updatedAt' - - singer.write_schema("forms", schema, ["guid"], [bookmark_key], catalog.get('stream_alias')) - start = get_start(STATE, "forms", bookmark_key) - max_bk_value = start - - LOGGER.info("sync_forms from %s", start) - - data = request(get_url("forms")).json() - time_extracted = utils.now() - - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - # To handle records updated between start of the table sync and the end, - # store the current sync start in the state and not move the bookmark past this value. - sync_start_time = utils.now() - for row in data: - record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) - - if record[bookmark_key] >= start: - singer.write_record("forms", record, catalog.get('stream_alias'), time_extracted=time_extracted) - if record[bookmark_key] >= max_bk_value: - max_bk_value = record[bookmark_key] - - # Don't bookmark past the start of this sync to account for updated records during the sync. - new_bookmark = min(utils.strptime_to_utc(max_bk_value), sync_start_time) - STATE = singer.write_bookmark(STATE, 'forms', bookmark_key, utils.strftime(new_bookmark)) - singer.write_state(STATE) - - return STATE - -def sync_workflows(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - schema = load_schema("workflows") - bookmark_key = 'updatedAt' - singer.write_schema("workflows", schema, ["id"], [bookmark_key], catalog.get('stream_alias')) - start = get_start(STATE, "workflows", bookmark_key) - max_bk_value = start - - STATE = singer.write_bookmark(STATE, 'workflows', bookmark_key, max_bk_value) - singer.write_state(STATE) - - LOGGER.info("sync_workflows from %s", start) - - data = request(get_url("workflows")).json() - time_extracted = utils.now() - - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - # To handle records updated between start of the table sync and the end, - # store the current sync start in the state and not move the bookmark past this value. - sync_start_time = utils.now() - for row in data['workflows']: - record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) - if record[bookmark_key] >= start: - singer.write_record("workflows", record, catalog.get('stream_alias'), time_extracted=time_extracted) - if record[bookmark_key] >= max_bk_value: - max_bk_value = record[bookmark_key] - - # Don't bookmark past the start of this sync to account for updated records during the sync. - new_bookmark = min(utils.strptime_to_utc(max_bk_value), sync_start_time) - STATE = singer.write_bookmark(STATE, 'workflows', bookmark_key, utils.strftime(new_bookmark)) - singer.write_state(STATE) - return STATE - -def sync_owners(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - schema = load_schema("owners") - bookmark_key = 'updatedAt' - - singer.write_schema("owners", schema, ["ownerId"], [bookmark_key], catalog.get('stream_alias')) - start = get_start(STATE, "owners", bookmark_key) - max_bk_value = start - - LOGGER.info("sync_owners from %s", start) - - params = {} - if CONFIG.get('include_inactives'): - params['includeInactives'] = "true" - data = request(get_url("owners"), params).json() - - time_extracted = utils.now() - - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - # To handle records updated between start of the table sync and the end, - # store the current sync start in the state and not move the bookmark past this value. - sync_start_time = utils.now() - for row in data: - record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) - if record[bookmark_key] >= max_bk_value: - max_bk_value = record[bookmark_key] - - if record[bookmark_key] >= start: - singer.write_record("owners", record, catalog.get('stream_alias'), time_extracted=time_extracted) - - # Don't bookmark past the start of this sync to account for updated records during the sync. - new_bookmark = min(utils.strptime_to_utc(max_bk_value), sync_start_time) - STATE = singer.write_bookmark(STATE, 'owners', bookmark_key, utils.strftime(new_bookmark)) - singer.write_state(STATE) - return STATE - -def sync_engagements(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - schema = load_schema("engagements") - bookmark_key = 'lastUpdated' - singer.write_schema("engagements", schema, ["engagement_id"], [bookmark_key], catalog.get('stream_alias')) - start = get_start(STATE, "engagements", bookmark_key) - - # Because this stream doesn't query by `lastUpdated`, it cycles - # through the data set every time. The issue with this is that there - # is a race condition by which records may be updated between the - # start of this table's sync and the end, causing some updates to not - # be captured, in order to combat this, we must store the current - # sync's start in the state and not move the bookmark past this value. - current_sync_start = get_current_sync_start(STATE, "engagements") or utils.now() - STATE = write_current_sync_start(STATE, "engagements", current_sync_start) - singer.write_state(STATE) - - max_bk_value = start - LOGGER.info("sync_engagements from %s", start) - - STATE = singer.write_bookmark(STATE, 'engagements', bookmark_key, start) - singer.write_state(STATE) - - url = get_url("engagements_all") - params = {'limit': 250} - top_level_key = "results" - engagements = gen_request(STATE, 'engagements', url, params, top_level_key, "hasMore", ["offset"], ["offset"]) - - time_extracted = utils.now() - - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - for engagement in engagements: - record = bumble_bee.transform(lift_properties_and_versions(engagement), schema, mdata) - if record['engagement'][bookmark_key] >= start: - # hoist PK and bookmark field to top-level record - record['engagement_id'] = record['engagement']['id'] - record[bookmark_key] = record['engagement'][bookmark_key] - singer.write_record("engagements", record, catalog.get('stream_alias'), time_extracted=time_extracted) - if record['engagement'][bookmark_key] >= max_bk_value: - max_bk_value = record['engagement'][bookmark_key] - - # Don't bookmark past the start of this sync to account for updated records during the sync. - new_bookmark = min(utils.strptime_to_utc(max_bk_value), current_sync_start) - STATE = singer.write_bookmark(STATE, 'engagements', bookmark_key, utils.strftime(new_bookmark)) - STATE = write_current_sync_start(STATE, 'engagements', None) - singer.write_state(STATE) - return STATE - -def sync_deal_pipelines(STATE, ctx): - catalog = ctx.get_catalog_from_id(singer.get_currently_syncing(STATE)) - mdata = metadata.to_map(catalog.get('metadata')) - schema = load_schema('deal_pipelines') - singer.write_schema('deal_pipelines', schema, ['pipelineId'], catalog.get('stream_alias')) - LOGGER.info('sync_deal_pipelines') - data = request(get_url('deal_pipelines')).json() - with Transformer(UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING) as bumble_bee: - for row in data: - record = bumble_bee.transform(lift_properties_and_versions(row), schema, mdata) - singer.write_record("deal_pipelines", record, catalog.get('stream_alias'), time_extracted=utils.now()) - singer.write_state(STATE) - return STATE - -@attr.s -class Stream: - tap_stream_id = attr.ib() - sync = attr.ib() - key_properties = attr.ib() - replication_key = attr.ib() - replication_method = attr.ib() - -STREAMS = [ - # Do these first as they are incremental - Stream('subscription_changes', sync_subscription_changes, ['timestamp', 'portalId', 'recipient'], 'startTimestamp', 'INCREMENTAL'), - Stream('email_events', sync_email_events, ['id'], 'startTimestamp', 'INCREMENTAL'), - Stream('contacts', sync_contacts, ["vid"], 'versionTimestamp', 'INCREMENTAL'), - Stream('deals', sync_deals, ["dealId"], 'property_hs_lastmodifieddate', 'INCREMENTAL'), - Stream('companies', sync_companies, ["companyId"], 'property_hs_lastmodifieddate', 'INCREMENTAL'), - Stream('tickets', sync_tickets, ['id'], 'updatedAt', 'INCREMENTAL'), - - # Do these last as they are full table - Stream('forms', sync_forms, ['guid'], 'updatedAt', 'FULL_TABLE'), - Stream('workflows', sync_workflows, ['id'], 'updatedAt', 'FULL_TABLE'), - Stream('owners', sync_owners, ["ownerId"], 'updatedAt', 'FULL_TABLE'), - Stream('campaigns', sync_campaigns, ["id"], None, 'FULL_TABLE'), - Stream('contact_lists', sync_contact_lists, ["listId"], 'updatedAt', 'FULL_TABLE'), - Stream('deal_pipelines', sync_deal_pipelines, ['pipelineId'], None, 'FULL_TABLE'), - Stream('engagements', sync_engagements, ["engagement_id"], 'lastUpdated', 'FULL_TABLE') -] - -def get_streams_to_sync(streams, state): - target_stream = singer.get_currently_syncing(state) - result = streams - if target_stream: - skipped = list(itertools.takewhile( - lambda x: x.tap_stream_id != target_stream, streams)) - rest = list(itertools.dropwhile( - lambda x: x.tap_stream_id != target_stream, streams)) - result = rest + skipped # Move skipped streams to end - if not result: - raise Exception('Unknown stream {} in state'.format(target_stream)) - return result - -def get_selected_streams(remaining_streams, ctx): - selected_streams = [] - for stream in remaining_streams: - if stream.tap_stream_id in ctx.selected_stream_ids: - selected_streams.append(stream) - return selected_streams - -def do_sync(STATE, catalog): - # Clear out keys that are no longer used - clean_state(STATE) - - ctx = Context(catalog) - validate_dependencies(ctx) - - remaining_streams = get_streams_to_sync(STREAMS, STATE) - selected_streams = get_selected_streams(remaining_streams, ctx) - LOGGER.info('Starting sync. Will sync these streams: %s', - [stream.tap_stream_id for stream in selected_streams]) - for stream in selected_streams: - LOGGER.info('Syncing %s', stream.tap_stream_id) - STATE = singer.set_currently_syncing(STATE, stream.tap_stream_id) - singer.write_state(STATE) - - try: - STATE = stream.sync(STATE, ctx) # pylint: disable=not-callable - except SourceUnavailableException as ex: - error_message = str(ex).replace(CONFIG['access_token'], 10 * '*') - LOGGER.error(error_message) - except UriTooLongException as ex: - LOGGER.fatal(f"For stream - {stream.tap_stream_id}, please select fewer fields. " - f"The current selection exceeds Hubspot's maximum character allowance.") - raise ex - STATE = singer.set_currently_syncing(STATE, None) - singer.write_state(STATE) - LOGGER.info("Sync completed") - -class Context: - def __init__(self, catalog): - self.selected_stream_ids = set() - - for stream in catalog.get('streams'): - mdata = metadata.to_map(stream['metadata']) - if metadata.get(mdata, (), 'selected'): - self.selected_stream_ids.add(stream['tap_stream_id']) - - self.catalog = catalog - - def get_catalog_from_id(self, tap_stream_id): - return [c for c in self.catalog.get('streams') if c.get('stream') == tap_stream_id][0] - -# stream a is dependent on stream STREAM_DEPENDENCIES[a] -STREAM_DEPENDENCIES = { - CONTACTS_BY_COMPANY: 'companies' -} - -def validate_dependencies(ctx): - errs = [] - msg_tmpl = ("Unable to extract {0} data. " - "To receive {0} data, you also need to select {1}.") - - for k, v in STREAM_DEPENDENCIES.items(): - if k in ctx.selected_stream_ids and v not in ctx.selected_stream_ids: - errs.append(msg_tmpl.format(k, v)) - if errs: - raise DependencyException(" ".join(errs)) - -def load_discovered_schema(stream): - schema = load_schema(stream.tap_stream_id) - mdata = metadata.new() - - mdata = metadata.write(mdata, (), 'table-key-properties', stream.key_properties) - mdata = metadata.write(mdata, (), 'forced-replication-method', stream.replication_method) - - if stream.replication_key: - mdata = metadata.write(mdata, (), 'valid-replication-keys', [stream.replication_key]) - - for field_name in schema['properties'].keys(): - if field_name in stream.key_properties or field_name == stream.replication_key: - mdata = metadata.write(mdata, ('properties', field_name), 'inclusion', 'automatic') - else: - mdata = metadata.write(mdata, ('properties', field_name), 'inclusion', 'available') - - # The engagements stream has nested data that we synthesize; The engagement field needs to be automatic - if stream.tap_stream_id == "engagements": - mdata = metadata.write(mdata, ('properties', 'engagement'), 'inclusion', 'automatic') - mdata = metadata.write(mdata, ('properties', 'lastUpdated'), 'inclusion', 'automatic') - - return schema, metadata.to_list(mdata) - -def discover_schemas(): - result = {'streams': []} - for stream in STREAMS: - LOGGER.info('Loading schema for %s', stream.tap_stream_id) - try: - schema, mdata = load_discovered_schema(stream) - result['streams'].append({'stream': stream.tap_stream_id, - 'tap_stream_id': stream.tap_stream_id, - 'schema': schema, - 'metadata': mdata}) - except SourceUnavailableException as ex: - # Skip the discovery mode on the streams were the required scopes are missing - warning_message = str(ex).replace(CONFIG['access_token'], 10 * '*') - LOGGER.warning(warning_message) - # Load the contacts_by_company schema - LOGGER.info('Loading schema for contacts_by_company') - contacts_by_company = Stream('contacts_by_company', _sync_contacts_by_company, ['company-id', 'contact-id'], None, 'FULL_TABLE') - schema, mdata = load_discovered_schema(contacts_by_company) - - result['streams'].append({'stream': CONTACTS_BY_COMPANY, - 'tap_stream_id': CONTACTS_BY_COMPANY, - 'schema': schema, - 'metadata': mdata}) - - return result - -def do_discover(): - LOGGER.info('Loading schemas') - json.dump(discover_schemas(), sys.stdout, indent=4) - -def get_request_timeout(): - # Get `request_timeout` value from config. - config_request_timeout = CONFIG.get('request_timeout') - # if config request_timeout is other than 0, "0" or "" then use request_timeout - if config_request_timeout and float(config_request_timeout): - request_timeout = float(config_request_timeout) - else: - # If value is 0, "0", "" or not passed then it set default to 300 seconds. - request_timeout = REQUEST_TIMEOUT - return request_timeout - -def main_impl(): - args = utils.parse_args( - ["redirect_uri", - "client_id", - "client_secret", - "refresh_token", - "start_date"]) - - CONFIG.update(args.config) - STATE = {} - - if args.state: - STATE.update(args.state) - - if args.discover: - do_discover() - elif args.properties: - do_sync(STATE, args.properties) - else: - LOGGER.info("No properties were selected") - -def main(): - try: - main_impl() - except Exception as exc: - LOGGER.critical(exc) - raise exc - -if __name__ == '__main__': - main() diff --git a/archive/tap_hubspot/schemas/campaigns.json b/archive/tap_hubspot/schemas/campaigns.json deleted file mode 100644 index 29797da..0000000 --- a/archive/tap_hubspot/schemas/campaigns.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "type": "object", - "properties": { - "appId": { - "type": ["null", "integer"] - }, - "appName": { - "type": ["null", "string"] - }, - "contentId": { - "type": ["null", "integer"] - }, - "counters": { - "type": ["null", "object"], - "properties": { - "delievered": { - "type": ["null", "integer"] - }, - "open": { - "type": ["null", "integer"] - }, - "processed": { - "type": ["null", "integer"] - }, - "sent": { - "type": ["null", "integer"] - }, - "deferred": { - "type": ["null", "integer"] - }, - "unsubscribed": { - "type": ["null", "integer"] - }, - "statuschange": { - "type": ["null", "integer"] - }, - "bounce": { - "type": ["null", "integer"] - }, - "mta_dropped": { - "type": ["null", "integer"] - }, - "dropped": { - "type": ["null", "integer"] - }, - "suppressed": { - "type": ["null", "integer"] - }, - "click": { - "type": ["null", "integer"] - }, - "delivered": { - "type": ["null", "integer"] - }, - "forward": { - "type": ["null", "integer"] - }, - "print": { - "type": ["null", "integer"] - }, - "reply": { - "type": ["null", "integer"] - }, - "spamreport": { - "type": ["null", "integer"] - } - } - }, - "id": { - "type": ["null", "integer"] - }, - "name": { - "type": ["null", "string"] - }, - "numIncluded": { - "type": ["null", "integer"] - }, - "numQueued": { - "type": ["null", "integer"] - }, - "subType": { - "type": ["null", "string"] - }, - "subject": { - "type": ["null", "string"] - }, - "type": { - "type": ["null", "string"] - } - } -} diff --git a/archive/tap_hubspot/schemas/companies.json b/archive/tap_hubspot/schemas/companies.json deleted file mode 100644 index 286f249..0000000 --- a/archive/tap_hubspot/schemas/companies.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "type": "object", - "properties": { - "portalId": { - "type": ["null", "integer"] - }, - "companyId": { - "type": ["null", "integer"] - } - } -} diff --git a/archive/tap_hubspot/schemas/contact_lists.json b/archive/tap_hubspot/schemas/contact_lists.json deleted file mode 100644 index d3ad2ae..0000000 --- a/archive/tap_hubspot/schemas/contact_lists.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "type": "object", - "properties": { - "parentId": { - "type": ["null", "integer"] - }, - "metaData": { - "type": "object", - "properties": { - "processing": { - "type": ["null", "string"] - }, - "size": { - "type": ["null", "integer"] - }, - "error": { - "type": ["null", "string"] - }, - "lastProcessingStateChangeAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "lastSizeChangeAt": { - "type": ["null", "string"], - "format": "date-time" - } - } - }, - "dynamic": { - "type": ["null", "boolean"] - }, - "name": { - "type": ["null", "string"] - }, - "filters": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "object", - "properties": { - "filterFamily": { - "type": ["null", "string"] - }, - "withinTimeMode": { - "type": ["null", "string"] - }, - "checkPastVersions": { - "type": ["null", "boolean"] - }, - "type": { - "type": ["null", "string"] - }, - "property": { - "type": ["null", "string"] - }, - "value": { - "type": ["null", "string"] - }, - "operator": { - "type": ["null", "string"] - } - } - } - } - }, - "portalId": { - "type": ["null", "integer"] - }, - "createdAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "listId": { - "type": ["null", "integer"] - }, - "updatedAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "internalListId": { - "type": ["null", "integer"] - }, - "readOnly": { - "type": ["null", "boolean"] - }, - "deleteable": { - "type": ["null", "boolean"] - }, - "listType": { - "type": ["null", "string"] - }, - "archived": { - "type": ["null", "boolean"] - } - } -} diff --git a/archive/tap_hubspot/schemas/contacts.json b/archive/tap_hubspot/schemas/contacts.json deleted file mode 100644 index 35e610f..0000000 --- a/archive/tap_hubspot/schemas/contacts.json +++ /dev/null @@ -1,201 +0,0 @@ -{ - "type": "object", - "properties": { - "vid": { - "type": ["null", "integer"] - }, - "versionTimestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "canonical-vid": { - "type": ["null", "integer"] - }, - "merged-vids": { - "type": ["null", "array"], - "items": { - "type": ["null", "integer"] - } - }, - "portal-id": { - "type": ["null", "integer"] - }, - "is-contact": { - "type": ["null", "boolean"] - }, - "profile-token": { - "type": ["null", "string"] - }, - "profile-url": { - "type": ["null", "string"] - }, - "associated-company" : { - "type": ["null", "object"], - "properties" : {} - }, - "identity-profiles": { - "type": ["null", "array"], - "items": { - "type": ["null", "object"], - "properties": { - "deleted-changed-timestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "saved-at-timestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "vid": { - "type": ["null", "integer"] - }, - "identities": { - "type": ["null", "array"], - "items": { - "type": ["null", "object"], - "properties": { - "timestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "type": { - "type": ["null", "string"] - }, - "value": { - "type": ["null", "string"] - } - } - } - } - } - } - }, - "list-memberships": { - "type": ["null", "array"], - "items": { - "type": ["null", "object"], - "properties": { - "internal-list-id": { - "type": ["null", "integer"] - }, - "is-member": { - "type": ["null", "boolean"] - }, - "static-list-id": { - "type": ["null", "integer"] - }, - "timestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "vid": { - "type": ["null", "integer"] - } - } - } - }, - "form-submissions": { - "type": ["null", "array"], - "items": { - "type": ["null", "object"], - "properties": { - "conversion-id": { - "type": ["null", "string"] - }, - "timestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "form-id": { - "type": ["null", "string"] - }, - "portal-id": { - "type": ["null", "integer"] - }, - "page-url": { - "type": ["null", "string"] - }, - "title": { - "type": ["null", "string"] - } - } - } - }, - "merge-audits": { - "type": ["null", "array"], - "items": { - "type": ["null", "object"], - "properties": { - "canonical-vid": { - "type": ["null", "integer"] - }, - "vid-to-merge": { - "type": ["null", "integer"] - }, - "timestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "user-id": { - "type": ["null", "integer"] - }, - "num-properties-moved": { - "type": ["null", "integer"] - }, - "merged_from_email": { - "type": ["null", "object"], - "properties": { - "value": { - "type": ["null", "string"] - }, - "source-type": { - "type": ["null", "string"] - }, - "source-id": { - "type": ["null", "string"] - }, - "source-label": { - "type": ["null", "string"] - }, - "source-vids": { - "type": ["null", "array"], - "items": { - "type": ["null", "integer"] - } - }, - "timestamp": { - "type": ["null", "integer"] - }, - "selected": { - "type": ["null", "boolean"] - } - } - }, - "merged_to_email": { - "type": ["null", "object"], - "properties": { - "value": { - "type": ["null", "string"] - }, - "source-type": { - "type": ["null", "string"] - }, - "source-id": { - "type": ["null", "string"] - }, - "source-label": { - "type": ["null", "string"] - }, - "timestamp": { - "type": ["null", "integer"] - }, - "selected": { - "type": ["null", "boolean"] - } - } - } - } - } - } - } -} diff --git a/archive/tap_hubspot/schemas/contacts_by_company.json b/archive/tap_hubspot/schemas/contacts_by_company.json deleted file mode 100644 index dafd30b..0000000 --- a/archive/tap_hubspot/schemas/contacts_by_company.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "type": "object", - "properties": { - "contact-id": { - "type": ["integer"] - }, - "company-id": { - "type": ["integer"] - } - }, - "additionalProperties": false -} diff --git a/archive/tap_hubspot/schemas/deal_pipelines.json b/archive/tap_hubspot/schemas/deal_pipelines.json deleted file mode 100644 index e23a644..0000000 --- a/archive/tap_hubspot/schemas/deal_pipelines.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "type": "object", - "properties": { - "pipelineId": { - "type": ["null", "string"] - }, - "stages": { - "type": ["null", "array"], - "items": { - "type": "object", - "properties": { - "stageId": { - "type": ["null", "string"] - }, - "label": { - "type": ["null", "string"] - }, - "probability": { - "type": ["null", "number"] - }, - "active": { - "type": ["null", "boolean"] - }, - "displayOrder": { - "type": ["null", "integer"] - }, - "closedWon": { - "type": ["null", "boolean"] - } - } - } - }, - "label": { - "type": ["null", "string"] - }, - "active": { - "type": ["null", "boolean"] - }, - "displayOrder": { - "type": ["null", "integer"] - }, - "staticDefault": { - "type": ["null", "boolean"] - } - } -} diff --git a/archive/tap_hubspot/schemas/deals.json b/archive/tap_hubspot/schemas/deals.json deleted file mode 100644 index a6cda1d..0000000 --- a/archive/tap_hubspot/schemas/deals.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "type": "object", - "properties": { - "portalId": { - "type": ["null", "integer"] - }, - "dealId": { - "type": ["null", "integer"] - }, - "isDeleted": { - "type": ["null", "boolean"] - }, - "associations": { - "type": ["null", "object"], - "properties": { - "associatedVids": { - "type": ["null", "array"], - "items": { - "type": ["null", "integer"] - } - }, - "associatedCompanyIds": { - "type": ["null", "array"], - "items": { - "type": ["null", "integer"] - } - }, - "associatedDealIds": { - "type": ["null", "array"], - "items": { - "type": ["null", "integer"] - } - } - } - } - } -} diff --git a/archive/tap_hubspot/schemas/email_events.json b/archive/tap_hubspot/schemas/email_events.json deleted file mode 100644 index e74aa07..0000000 --- a/archive/tap_hubspot/schemas/email_events.json +++ /dev/null @@ -1,118 +0,0 @@ -{ - "type": "object", - "properties": { - "appId": { - "type": ["null", "integer"] - }, - "appName": { - "type": ["null", "string"] - }, - "browser": { - "type": ["null", "object"], - "properties": { - "family": { - "type": ["null", "string"] - }, - "name": { - "type": ["null", "string"] - }, - "producer": { - "type": ["null", "string"] - }, - "producerUrl": { - "type": ["null", "string"] - }, - "type": { - "type": ["null", "string"] - }, - "url": { - "type": ["null", "string"] - } - } - }, - "created": { - "type": ["null", "string"], - "format": "date-time" - }, - "deviceType": { - "type": ["null", "string"] - }, - "duration": { - "type": ["null", "integer"] - }, - "emailCampaignId": { - "type": ["null", "integer"] - }, - "emailCampaignGroupId": { - "type": ["null", "integer"] - }, - "filteredEvent": { - "type": ["null", "boolean"] - }, - "from": { - "type": ["null", "string"] - }, - "hmid": { - "type": ["null", "string"] - }, - "id": { - "type": ["null", "string"] - }, - "ipAddress": { - "type": ["null", "string"] - }, - "linkId": { - "type": ["null", "integer"] - }, - "location": { - "type": ["null", "object"], - "properties": { - "city": { - "type": ["null", "string"] - }, - "country": { - "type": ["null", "string"] - }, - "state": { - "type": ["null", "string"] - } - } - }, - "portalId": { - "type": ["null", "integer"] - }, - "recipient": { - "type": ["null", "string"] - }, - "response": { - "type": ["null", "string"] - }, - "sentBy": { - "type": ["null", "object"], - "properties": { - "created": { - "type": ["null", "string"], - "format": "date-time" - }, - "id": { - "type": ["null", "string"] - } - } - }, - "smtpId": { - "type": ["null", "string"] - }, - "subject": { - "type": ["null", "string"] - }, - "type": { - "type": ["null", "string"] - }, - "url": { - "type": ["null", "string"] - }, - "userAgent": { - "type": ["null", "string"] - } - } -} diff --git a/archive/tap_hubspot/schemas/engagements.json b/archive/tap_hubspot/schemas/engagements.json deleted file mode 100644 index 71be960..0000000 --- a/archive/tap_hubspot/schemas/engagements.json +++ /dev/null @@ -1,179 +0,0 @@ -{ - "type": "object", - "properties": { - "engagement_id": { - "type": "integer" - }, - "lastUpdated": { - "type": ["null", "string"], - "format": "date-time" - }, - "engagement": { - "type": "object", - "properties": { - "id": { - "type": "integer" - }, - "portalId": { - "type": "integer" - }, - "active": { - "type": "boolean" - }, - "createdAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "lastUpdated": { - "type": ["null", "string"], - "format": "date-time" - }, - "ownerId": { - "type": "integer" - }, - "type": { - "type": "string" - }, - "timestamp": { - "type": ["null", "string"], - "format": "date-time" - } - } - }, - "associations": { - "type": ["null", "object"], - "properties": { - "contactIds": { - "type": ["null", "array"], - "items": { - "type": "integer" - } - }, - "companyIds": { - "type": ["null", "array"], - "items": { - "type": "integer" - } - }, - "dealIds": { - "type": ["null", "array"], - "items": { - "type": "integer" - } - } - } - }, - "attachments": { - "type": ["null", "array"], - "items": { - "type": "object", - "properties": { - "id": { - "type": "integer" - } - } - } - }, - "metadata": { - "type": ["null", "object"], - "properties": { - "body": { - "type": ["null", "string"] - }, - "from": { - "type": ["null", "object"], - "properties": { - "email": { - "type": "string" - }, - "firstName": { - "type": "string" - }, - "lastName": { - "type": "string" - } - } - }, - "to": { - "type": ["null", "array"], - "items": { - "type": "object", - "properties": { - "email": { - "type": "string" - } - } - } - }, - "cc": { - "type": ["null", "array"], - "items": { - "type": "object", - "properties": { - "email": { - "type": "string" - } - } - } - }, - "bcc": { - "type": ["null", "array"], - "items": { - "type": "object", - "properties": { - "email": { - "type": "string" - } - } - } - }, - "subject": { - "type": ["null", "string"] - }, - "html": { - "type": ["null", "string"] - }, - "text": { - "type": ["null", "string"] - }, - "status": { - "type": ["null", "string"] - }, - "forObjectType": { - "type": ["null", "string"] - }, - "startTime": { - "type": ["null", "integer"] - }, - "endTime": { - "type": ["null", "integer"] - }, - "title": { - "type": ["null", "string"] - }, - "toNumber": { - "type": ["null", "string"] - }, - "fromNumber": { - "type": ["null", "string"] - }, - "externalId": { - "type": ["null", "string"] - }, - "durationMilliseconds": { - "type": ["null", "integer"] - }, - "externalAccountId": { - "type": ["null", "string"] - }, - "recordingUrl": { - "type": ["null", "string"], - "format": "uri" - }, - "disposition": { - "type": ["null", "string"] - } - } - } - } -} diff --git a/archive/tap_hubspot/schemas/forms.json b/archive/tap_hubspot/schemas/forms.json deleted file mode 100644 index 61fcaa9..0000000 --- a/archive/tap_hubspot/schemas/forms.json +++ /dev/null @@ -1,229 +0,0 @@ -{ - "type": "object", - "properties": { - "deletedAt": { - "type": ["null", "integer"] - }, - "portalId": { - "type": ["null", "integer"] - }, - "guid": { - "type": ["null", "string"] - }, - "name": { - "type": ["null", "string"] - }, - "action": { - "type": ["null", "string"] - }, - "method": { - "type": ["null", "string"] - }, - "cssClass": { - "type": ["null", "string"] - }, - "redirect": { - "type": ["null", "string"] - }, - "submitText": { - "type": ["null", "string"] - }, - "followUpId": { - "type": ["null", "string"] - }, - "notifyRecipients": { - "type": ["null", "string"] - }, - "leadNurturingCampaignId": { - "type": ["null", "string"] - }, - "formFieldGroups": { - "type": "array", - "items": { - "type": "object", - "properties": { - "fields": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": ["null", "string"] - }, - "label": { - "type": ["null", "string"] - }, - "type": { - "type": ["null", "string"] - }, - "fieldType": { - "type": ["null", "string"] - }, - "description": { - "type": ["null", "string"] - }, - "groupName": { - "type": ["null", "string"] - }, - "displayOrder": { - "type": ["null", "integer"] - }, - "required": { - "type": ["null", "boolean"] - }, - "validation": { - "type": "object", - "properties": { - "name": { - "type": ["null", "string"] - }, - "message": { - "type": ["null", "string"] - }, - "data": { - "type": ["null", "string"] - }, - "useDefaultBlockList": { - "type": ["null", "boolean"] - }, - "blockedEmailAddresses": { - "type": "array", - "items": { - "type": ["null", "string"] - } - } - } - }, - "enabled": { - "type": ["null", "boolean"] - }, - "hidden": { - "type": ["null", "boolean"] - }, - "defaultValue": { - "type": ["null", "string"] - }, - "isSmartField": { - "type": ["null", "boolean"] - }, - "unselectedLabel": { - "type": ["null", "string"] - }, - "placeholder": { - "type": ["null", "string"] - }, - "labelHidden": { - "type": ["null", "boolean"] - }, - "options": { - "type": "array", - "items": { - "type": "object", - "properties": { - "description": { - "type": ["null", "string"] - }, - "displayOrder": { - "type": ["null", "integer"] - }, - "doubleData": { - "type": ["null", "number"] - }, - "hidden" : { - "type": ["null", "boolean"] - }, - "label": { - "type": ["null", "string"] - }, - "readOnly": { - "type": ["null", "boolean"] - }, - "value": { - "type": ["null", "string"] - } - } - } - }, - "selectedOptions": { - "type": "array", - "items": { - "type" : ["null", "string"] - } - } - } - } - }, - "default": { - "type": ["null", "boolean"] - }, - "isSmartGroup": { - "type": ["null", "boolean"] - }, - "richText": { - "type": "object", - "properties": { - "content": { - "type": ["null", "string"] - } - } - } - } - } - }, - "createdAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "updatedAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "performableHtml": { - "type": ["null", "string"] - }, - "migratedFrom": { - "type": ["null", "string"] - }, - "ignoreCurrentValues": { - "type": ["null", "boolean"] - }, - "deletable": { - "type": ["null", "boolean"] - }, - "inlineMessage": { - "type": ["null", "string"] - }, - "tmsId": { - "type": ["null", "string"] - }, - "captchaEnabled": { - "type": ["null", "boolean"] - }, - "campaignGuid": { - "type": ["null", "string"] - }, - "cloneable": { - "type": ["null", "boolean"] - }, - "editable": { - "type": ["null", "boolean"] - }, - "formType": { - "type": ["null", "string"] - }, - "metaData": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": ["null", "string"] - }, - "value": { - "type": ["null", "string"] - } - } - } - } - } -} diff --git a/archive/tap_hubspot/schemas/owners.json b/archive/tap_hubspot/schemas/owners.json deleted file mode 100644 index 2e3d61d..0000000 --- a/archive/tap_hubspot/schemas/owners.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "type": "object", - "properties": { - "portalId": { - "type": ["null", "integer"] - }, - "ownerId": { - "type": ["null", "integer"] - }, - "type": { - "type": ["null", "string"] - }, - "firstName": { - "type": ["null", "string"] - }, - "lastName": { - "type": ["null", "string"] - }, - "email": { - "type": ["null", "string"] - }, - "createdAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "signature": { - "type": ["null", "string"] - }, - "updatedAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "hasContactsAccess" : { - "type": ["null", "boolean"] - }, - "isActive": { - "type": ["null", "boolean"] - }, - "activeUserId" : { - "type": ["null", "integer"] - }, - "userIdIncludingInactive" : { - "type": ["null", "integer"] - }, - "remoteList": { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": ["null", "integer"] - }, - "portalId": { - "type": ["null", "integer"] - }, - "ownerId": { - "type": ["null", "integer"] - }, - "remoteId": { - "type": ["null", "string"] - }, - "remoteType": { - "type": ["null", "string"] - }, - "active": { - "type": ["null", "boolean"] - } - } - } - } - } -} diff --git a/archive/tap_hubspot/schemas/subscription_changes.json b/archive/tap_hubspot/schemas/subscription_changes.json deleted file mode 100644 index 1db687d..0000000 --- a/archive/tap_hubspot/schemas/subscription_changes.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "type": "object", - "properties": { - "timestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "portalId": { - "type": ["null", "integer"] - }, - "recipient": { - "type": ["null", "string"] - }, - "changes": { - "type": ["null", "array"], - "items": { - "type": ["null", "object"], - "properties": { - "change": { - "type": ["null", "string"] - }, - "timestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "source": { - "type": ["null", "string"] - }, - "portalId": { - "type": ["null", "integer"] - }, - "subscriptionId": { - "type": ["null", "integer"] - }, - "changeType": { - "type": ["null", "string"] - }, - "causedByEvent": { - "type": ["null", "object"], - "properties": { - "id": { - "type": ["null", "string"] - }, - "created": { - "type": ["null", "string"], - "format": "date-time" - } - } - } - } - } - } - } -} diff --git a/archive/tap_hubspot/schemas/tickets.json b/archive/tap_hubspot/schemas/tickets.json deleted file mode 100644 index 264c567..0000000 --- a/archive/tap_hubspot/schemas/tickets.json +++ /dev/null @@ -1,138 +0,0 @@ -{ - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "createdAt": { - "type": [ - "null", - "string" - ], - "format": "date-time" - }, - "updatedAt": { - "type": [ - "null", - "string" - ], - "format": "date-time" - }, - "archived": { - "type": [ - "null", - "boolean" - ] - }, - "associations": { - "type": [ - "null", - "object" - ], - "properties": { - "companies": { - "type": [ - "null", - "object" - ], - "properties": { - "results": { - "type": [ - "null", - "array" - ], - "items": { - "type": [ - "null", - "object" - ], - "properties": { - "id": { - "type": [ - "null", - "string" - ] - }, - "type": { - "type": [ - "null", - "string" - ] - } - } - } - } - } - }, - "deals": { - "type": [ - "null", - "object" - ], - "properties": { - "results": { - "type": [ - "null", - "array" - ], - "items": { - "type": [ - "null", - "object" - ], - "properties": { - "id": { - "type": [ - "null", - "string" - ] - }, - "type": { - "type": [ - "null", - "string" - ] - } - } - } - } - } - }, - "contacts": { - "type": [ - "null", - "object" - ], - "properties": { - "results": { - "type": [ - "null", - "array" - ], - "items": { - "type": [ - "null", - "object" - ], - "properties": { - "id": { - "type": [ - "null", - "string" - ] - }, - "type": { - "type": [ - "null", - "string" - ] - } - } - } - } - } - } - } - } - } -} diff --git a/archive/tap_hubspot/schemas/versions.json b/archive/tap_hubspot/schemas/versions.json deleted file mode 100644 index f725655..0000000 --- a/archive/tap_hubspot/schemas/versions.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "type": "array", - "items": { - "type": ["null", "object"], - "properties": { - "name": { - "type": ["null", "string"] - }, - "value": { - "type": ["null", "string"] - }, - "timestamp": { - "type": ["null", "string"], - "format": "date-time" - }, - "source": { - "type": ["null", "string"] - }, - "sourceId": { - "type": ["null", "string"] - }, - "sourceVid": { - "type": ["null", "array"], - "items": { - "type": ["null", "string"] - } - } - } - } -} diff --git a/archive/tap_hubspot/schemas/workflows.json b/archive/tap_hubspot/schemas/workflows.json deleted file mode 100644 index a72491a..0000000 --- a/archive/tap_hubspot/schemas/workflows.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "type": "object", - "properties": { - "name": { - "type": ["null", "string"] - }, - "id": { - "type": ["null", "integer"] - }, - "type": { - "type": ["null", "string"] - }, - "enabled": { - "type": ["null", "boolean"] - }, - "insertedAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "updatedAt": { - "type": ["null", "string"], - "format": "date-time" - }, - "personaTagIds": { - "type": "array", - "items": { - "type": "integer" - } - }, - "contactListIds": { - "type": "object", - "properties": { - "enrolled": { - "type": ["null", "integer"] - }, - "active": { - "type": ["null", "integer"] - }, - "steps": { - "type": ["null", "array"], - "items": { - "type": ["null", "string"] - } - } - } - } - } -} diff --git a/archive/tap_hubspot/tests/__init__.py b/archive/tap_hubspot/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/archive/tap_hubspot/tests/test_bookmarks.py b/archive/tap_hubspot/tests/test_bookmarks.py deleted file mode 100644 index cf01c4f..0000000 --- a/archive/tap_hubspot/tests/test_bookmarks.py +++ /dev/null @@ -1,62 +0,0 @@ -import unittest -import singer.messages -import tap_hubspot -from tap_hubspot.tests import utils - -LOGGER = singer.get_logger() - -class Bookmarks(unittest.TestCase): - def setUp(self): - utils.verify_environment_vars() - utils.seed_tap_hubspot_config() - utils.write_to_singer() - - #NB> test account must have > 2 contacts for this to work - def sync_contacts(self): - STATE = utils.get_clear_state() - catalog = {'stream_alias': 'hubspot_contacts'} - - tap_hubspot.default_contact_params['count'] = 1 - - STATE = tap_hubspot.sync_contacts(STATE, catalog) - #offset has been cleared - self.assertEqual(utils.caught_state['bookmarks']['contacts']['offset'], {}) - - #some bookmark has been recorded in the state - self.assertNotEqual(utils.caught_state['bookmarks']['contacts']['lastmodifieddate'], None) - - #should sync some contacts - # LOGGER.info('A caught record: {}'.format(utils.caught_records['contacts'][0])) - self.assertGreater(len(utils.caught_records['contacts']), 1) - self.assertEqual(set(utils.caught_records.keys()), {'contacts'}) - self.assertEqual(utils.caught_pks, {'contacts': ['vid']}) - - utils.caught_records = [] - STATE = tap_hubspot.sync_contacts(STATE, catalog) - - #no new records thanks to bookmark - self.assertEqual(len(utils.caught_records), 0) - - def sync_companies(self): - STATE = utils.get_clear_state() - - catalog = {'stream_alias': 'hubspot_companies'} - STATE = tap_hubspot.sync_companies(STATE, catalog) - - #offset has been cleared - self.assertEqual(utils.caught_state['bookmarks']['companies']['offset'], {}) - - #some bookmark has been recorded in the state - self.assertNotEqual(utils.caught_state['bookmarks']['companies']['hs_lastmodifieddate'], None) - - #should sync some contacts && some hubspot_contacts_by_company - self.assertGreater(len(utils.caught_records), 0) - self.assertEqual(set(utils.caught_records.keys()), {'companies', 'hubspot_contacts_by_company'}) - - self.assertEqual(utils.caught_pks, {'companies': ['companyId'], 'hubspot_contacts_by_company': ['company-id', 'contact-id']}) - - utils.caught_records = [] - STATE = tap_hubspot.sync_companies(STATE, catalog) - - #no new records thanks to bookmark - self.assertEqual(len(utils.caught_records), 0) diff --git a/archive/tap_hubspot/tests/test_deals.py b/archive/tap_hubspot/tests/test_deals.py deleted file mode 100644 index bc63d1d..0000000 --- a/archive/tap_hubspot/tests/test_deals.py +++ /dev/null @@ -1,34 +0,0 @@ -from tap_hubspot import sync_deals -from unittest.mock import patch, ANY - - -@patch('builtins.min') -@patch('tap_hubspot.Context.get_catalog_from_id', return_value={"metadata": ""}) -@patch('singer.metadata.to_map', return_value={}) -@patch('singer.utils.strptime_with_tz') -@patch('singer.utils.strftime') -@patch('tap_hubspot.load_schema') -@patch('tap_hubspot.gen_request', return_value=[]) -def test_associations_are_not_validated(mocked_gen_request, mocked_catalog_from_id, mocked_metadata_map, mocked_utils_strptime, mocked_utils_strftime, mocked_load_schema, mocked_min): - # pylint: disable=unused-argument - sync_deals({}, mocked_catalog_from_id) - - expected_param = {'includeAssociations': False, 'properties': [], 'limit': 100} - - mocked_gen_request.assert_called_once_with(ANY, ANY, ANY, expected_param, ANY, ANY, ANY, ANY, v3_fields=None) - - -@patch('builtins.min') -@patch('tap_hubspot.Context.get_catalog_from_id', return_value={"metadata": ""}) -@patch('singer.metadata.to_map', return_value={"associations": {"selected": True}}) -@patch('singer.utils.strptime_with_tz') -@patch('singer.utils.strftime') -@patch('tap_hubspot.load_schema') -@patch('tap_hubspot.gen_request', return_value=[]) -def test_associations_are_validated(mocked_gen_request, mocked_catalog_from_id, mocked_metadata_map, mocked_utils_strptime, mocked_utils_strftime, mocked_load_schema, mocked_min): - # pylint: disable=unused-argument - sync_deals({}, mocked_catalog_from_id) - - expected_param = {'includeAssociations': True, 'properties': [], 'limit': 100} - - mocked_gen_request.assert_called_once_with(ANY, ANY, ANY, expected_param, ANY, ANY, ANY, ANY, v3_fields=None) diff --git a/archive/tap_hubspot/tests/test_get_streams_to_sync.py b/archive/tap_hubspot/tests/test_get_streams_to_sync.py deleted file mode 100644 index 394f190..0000000 --- a/archive/tap_hubspot/tests/test_get_streams_to_sync.py +++ /dev/null @@ -1,44 +0,0 @@ -import unittest -from tap_hubspot import get_streams_to_sync, parse_source_from_url, Stream - - -class TestGetStreamsToSync(unittest.TestCase): - - def setUp(self): - self.streams = [ - Stream('a', 'a', [], None, None), - Stream('b', 'b', [], None, None), - Stream('c', 'c', [], None, None), - ] - - def test_get_streams_to_sync_with_no_this_stream(self): - state = {'this_stream': None} - self.assertEqual(self.streams, get_streams_to_sync(self.streams, state)) - - def test_get_streams_to_sync_with_first_stream(self): - state = {'currently_syncing': 'a'} - - result = get_streams_to_sync(self.streams, state) - - parsed_result = [s.tap_stream_id for s in result] - self.assertEqual(parsed_result, ['a', 'b', 'c']) - - def test_get_streams_to_sync_with_middle_stream(self): - state = {'currently_syncing': 'b'} - - result = get_streams_to_sync(self.streams, state) - - parsed_result = [s.tap_stream_id for s in result] - self.assertEqual(parsed_result, ['b', 'c', 'a']) - - def test_get_streams_to_sync_with_last_stream(self): - state = {'currently_syncing': 'c'} - - result = get_streams_to_sync(self.streams, state) - - parsed_result = [s.tap_stream_id for s in result] - self.assertEqual(parsed_result, ['c', 'a', 'b']) - - def test_parse_source_from_url_succeeds(self): - url = "https://api.hubapi.com/companies/v2/companies/recent/modified" - self.assertEqual('companies', parse_source_from_url(url)) diff --git a/archive/tap_hubspot/tests/test_offsets.py b/archive/tap_hubspot/tests/test_offsets.py deleted file mode 100644 index 5b8c588..0000000 --- a/archive/tap_hubspot/tests/test_offsets.py +++ /dev/null @@ -1,57 +0,0 @@ -import unittest -import singer -import tap_hubspot -import singer.bookmarks -from tap_hubspot.tests import utils - -LOGGER = singer.get_logger() - -def set_offset_with_exception(state, tap_stream_id, offset_key, offset_value): - LOGGER.info("set_offset_with_exception: %s", utils.caught_state) - utils.caught_state = singer.bookmarks.set_offset(state, tap_stream_id, offset_key, offset_value) - raise Exception("simulated") - -class Offsets(unittest.TestCase): - def setUp(self): - utils.verify_environment_vars() - utils.seed_tap_hubspot_config() - utils.write_to_singer() - singer.set_offset = set_offset_with_exception - - #NB> test accounts must have > 1 companies for this to work - def sync_companies(self): - simulated_exception = None - STATE = utils.get_clear_state() - catalog = {'stream_alias': 'hubspot_companies'} - - #change count = 1 - tap_hubspot.default_company_params['limit'] = 1 - - try: - STATE = tap_hubspot.sync_companies(STATE, catalog) - except Exception as ex: - simulated_exception = ex - # logging.exception('strange') - - self.assertIsNot(simulated_exception, None) - - - self.assertEqual(set(utils.caught_records.keys()), {'companies', 'hubspot_contacts_by_company'}) - - #should only emit 1 company record because of the limit - self.assertEqual(len(utils.caught_records['companies']), 1) - self.assertGreater(len(utils.caught_records['hubspot_contacts_by_company']), 0) - - #offset should be set in state - LOGGER.info("utils.caught_state: %s", utils.caught_state) - self.assertNotEqual(utils.caught_state['bookmarks']['companies']['offset'], {}) - - #no bookmark though - self.assertEqual(utils.caught_state['bookmarks']['companies']['hs_lastmodifieddate'], None) - - #change count back to 250 - tap_hubspot.default_company_params['limit'] = 250 - - #call do_sync and verify: - # 1)sync_companies is called first - # 2)previous retrieved record is not retrieved again diff --git a/archive/tap_hubspot/tests/unittests/test_get_start.py b/archive/tap_hubspot/tests/unittests/test_get_start.py deleted file mode 100644 index 42fed6e..0000000 --- a/archive/tap_hubspot/tests/unittests/test_get_start.py +++ /dev/null @@ -1,94 +0,0 @@ -import unittest -import tap_hubspot -from tap_hubspot import get_start -from tap_hubspot import singer - -def get_state(key,value): - """ - Returns a mock state - """ - return { - "bookmarks": { - "stream_id_1": { - "offset": {}, - key: value - } - } - } - -class TestGetStart(unittest.TestCase): - """ - Verify return value of `get_start` function. - """ - def test_get_start_without_state(self): - """ - This test verifies that `get_start` function returns start_date from CONFIG - if an empty state is passed. - """ - mock_state = {} - expected_value = tap_hubspot.CONFIG["start_date"] - returned_value = get_start(mock_state, "stream_id_1", "current_bookmark", "old_bookmark") - - # Verify that returned value is start_date - self.assertEqual(returned_value, expected_value) - - def test_get_start_with_old_bookmark(self): - """ - This test verifies that the `get_start` function returns old_bookmark from the state - if current_bookmark is not available in the state. - """ - mock_state = get_state("old_bookmark", "OLD_BOOKMARK_VALUE") - expected_value = "OLD_BOOKMARK_VALUE" - - returned_value = get_start(mock_state, "stream_id_1", "current_bookmark", "old_bookmark") - - # Verify that returned value is old_bookmark_value - self.assertEqual(returned_value, expected_value) - - def test_get_start_with_current_bookmark_and_no_old_bookmark(self): - """ - This test verifies that the `get_start` function returns current_bookmark from the state - if current_bookmark is available in the state and old_bookmark is not given. - """ - mock_state = get_state("current_bookmark", "CURR_BOOKMARK_VALUE") - expected_value = "CURR_BOOKMARK_VALUE" - - returned_value = get_start(mock_state, "stream_id_1", "current_bookmark") - - # Verify that returned value is current bookmark - self.assertEqual(returned_value, expected_value) - - def test_get_start_with_empty_start__no_old_bookmark(self): - """ - This test verifies that the `get_start` function returns start_date from CONFIG - if an empty state is passed and old_bookamrk is not given. - """ - mock_state = {} - expected_value = tap_hubspot.CONFIG["start_date"] - - returned_value = get_start(mock_state, "stream_id_1", "current_bookmark") - - # Verify that returned value is start_date - self.assertEqual(returned_value, expected_value) - - def test_get_start_with_both_bookmark(self): - """ - This test verifies that the `get_start` function returns current_bookmark from the state - if both old and current bookmark is available in the state. - """ - - mock_state = { - "bookmarks": { - "stream_id_1": { - "offset": {}, - "old_bookmark": "OLD_BOOKMARK_VALUE", - "current_bookmark": "CURR_BOOKMARK_VALUE" - } - } - } - expected_value = "CURR_BOOKMARK_VALUE" - - returned_value = get_start(mock_state, "stream_id_1", "current_bookmark", "old_bookmark") - - # Verify that returned value is current bookmark - self.assertEqual(returned_value, expected_value) diff --git a/archive/tap_hubspot/tests/unittests/test_request_timeout.py b/archive/tap_hubspot/tests/unittests/test_request_timeout.py deleted file mode 100644 index 33d5456..0000000 --- a/archive/tap_hubspot/tests/unittests/test_request_timeout.py +++ /dev/null @@ -1,121 +0,0 @@ -import unittest -import requests -from unittest import mock -import tap_hubspot -class TestRequestTimeoutValue(unittest.TestCase): - - def test_integer_request_timeout_in_config(self): - """ - Verify that if request_timeout is provided in config(integer value) then it should be use - """ - tap_hubspot.CONFIG.update({"request_timeout": 100}) # integer timeout in config - - request_timeout = tap_hubspot.get_request_timeout() - - self.assertEqual(request_timeout, 100.0) # Verify timeout value - - def test_float_request_timeout_in_config(self): - """ - Verify that if request_timeout is provided in config(float value) then it should be use - """ - tap_hubspot.CONFIG.update({"request_timeout": 100.5}) # float timeout in config - - request_timeout = tap_hubspot.get_request_timeout() - - self.assertEqual(request_timeout, 100.5) # Verify timeout value - - def test_string_request_timeout_in_config(self): - """ - Verify that if request_timeout is provided in config(string value) then it should be use - """ - tap_hubspot.CONFIG.update({"request_timeout": "100"}) # string format timeout in config - - request_timeout = tap_hubspot.get_request_timeout() - - self.assertEqual(request_timeout, 100.0) # Verify timeout value - - def test_empty_string_request_timeout_in_config(self): - """ - Verify that if request_timeout is provided in config with empty string then default value is used - """ - tap_hubspot.CONFIG.update({"request_timeout": ""}) # empty string in config - - request_timeout = tap_hubspot.get_request_timeout() - - self.assertEqual(request_timeout, 300) # Verify timeout value - - def test_zero_request_timeout_in_config(self): - """ - Verify that if request_timeout is provided in config with zero value then default value is used - """ - tap_hubspot.CONFIG.update({"request_timeout": 0}) # zero value in config - - request_timeout = tap_hubspot.get_request_timeout() - - self.assertEqual(request_timeout, 300) # Verify timeout value - - def test_zero_string_request_timeout_in_config(self): - """ - Verify that if request_timeout is provided in config with zero in string format then default value is used - """ - tap_hubspot.CONFIG.update({"request_timeout": '0'}) # zero value in config - - request_timeout = tap_hubspot.get_request_timeout() - - self.assertEqual(request_timeout, 300) # Verify timeout value - - def test_no_request_timeout_in_config(self): - """ - Verify that if request_timeout is not provided in config then default value is used - """ - tap_hubspot.CONFIG = {} - request_timeout = tap_hubspot.get_request_timeout() - - self.assertEqual(request_timeout, 300) # Verify timeout value - - -@mock.patch("time.sleep") -class TestRequestTimeoutBackoff(unittest.TestCase): - - @mock.patch('requests.Session.send', side_effect = requests.exceptions.Timeout) - @mock.patch("requests.Request.prepare") - @mock.patch('tap_hubspot.get_params_and_headers', return_value = ({}, {})) - def test_request_timeout_backoff(self, mocked_get, mocked_prepare, mocked_send, mocked_sleep): - """ - Verify request function is backoff for only 5 times on Timeout exception. - """ - try: - tap_hubspot.request('dummy_url', {}) - except Exception: - pass - - # Verify that Session.send is called 5 times - self.assertEqual(mocked_send.call_count, 5) - - @mock.patch('tap_hubspot.get_params_and_headers', return_value = ({}, {})) - @mock.patch('requests.post', side_effect = requests.exceptions.Timeout) - def test_request_timeout_backoff_for_post_search_endpoint(self, mocked_post, mocked_get, mocked_sleep): - """ - Verify post_search_endpoint function is backoff for only 5 times on Timeout exception. - """ - try: - tap_hubspot.post_search_endpoint('dummy_url', {}) - except Exception: - pass - - # Verify that requests.post is called 5 times - self.assertEqual(mocked_post.call_count, 5) - - @mock.patch('requests.post', side_effect = requests.exceptions.Timeout) - def test_request_timeout_backoff_for_acquire_access_token_from_refresh_token(self, mocked_post, mocked_sleep): - """ - Verify request function is backoff for only 5 times instead of 25 times on Timeout exception that thrown from `acquire_access_token_from_refresh_token` method. - Here get_params_and_headers method called from request method and acquire_access_token_from_refresh_token called from get_params_and_headers method. - """ - try: - tap_hubspot.post_search_endpoint('dummy_url', {}) - except Exception: - pass - - # Verify that requests.post is called 5 times - self.assertEqual(mocked_post.call_count, 5) diff --git a/archive/tap_hubspot/tests/unittests/test_tickets.py b/archive/tap_hubspot/tests/unittests/test_tickets.py deleted file mode 100644 index 38e3490..0000000 --- a/archive/tap_hubspot/tests/unittests/test_tickets.py +++ /dev/null @@ -1,147 +0,0 @@ -import unittest -from unittest.mock import patch - -from tap_hubspot import sync_tickets - -mock_response_data = { - "results": [{ - "updatedAt": "2022-08-18T12:57:17.587Z", - "createdAt": "2019-08-06T02:43:01.930Z", - "name": "hs_file_upload", - "label": "File upload", - "type": "string", - "fieldType": "file", - "description": "Files attached to a support form by a contact.", - "groupName": "ticketinformation", - "options": [], - "displayOrder": -1, - "calculated": False, - "externalOptions": False, - "hasUniqueValue": False, - "hidden": False, - "hubspotDefined": True, - "modificationMetadata": { - "archivable": True, - "readOnlyDefinition": True, - "readOnlyValue": False - }, - "formField": True - }] -} - - -class MockResponse: - - def __init__(self, json_data): - self.json_data = json_data - - def json(self): - return self.json_data - - -class MockContext: - def get_catalog_from_id(self, stream_name): - return { - "stream": "tickets", - "tap_stream_id": "tickets", - "schema": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "updatedAt": { - "type": [ - "null", - "string" - ], - "format": "date-time" - }, - "properties": { - "type": "object", - "properties": { - "hs_all_team_ids": { - "type": [ - "null", - "string" - ] - } - } - }, - "property_hs_all_team_ids": { - "type": [ - "null", - "string" - ] - } - } - }, - "metadata": [{ - "breadcrumb": [], - "metadata": { - "table-key-properties": ["id"], - "forced-replication-method": "INCREMENTAL", - "valid-replication-keys": [ - "updatedAt" - ], - "selected": True - } - }, - { - "breadcrumb": ["properties", "id"], - "metadata": { - "inclusion": "automatic" - } - }, - - { - "breadcrumb": ["properties", "updatedAt"], - "metadata": { - "inclusion": "automatic" - } - }, - { - "breadcrumb": ["properties", "properties"], - "metadata": { - "inclusion": "available" - } - }, - - { - "breadcrumb": ["properties", "property_hs_all_team_ids"], - "metadata": { - "inclusion": "available", - "selected": True - } - } - ] - } - - -class TestTickets(unittest.TestCase): - - @patch('tap_hubspot.request', return_value=MockResponse(mock_response_data)) - @patch('tap_hubspot.get_start', return_value='2023-01-01T00:00:00Z') - @patch('tap_hubspot.gen_request_tickets') - def test_ticket_params_are_validated(self, mocked_gen_request, mocked_get_start, - mock_request_response): - """ - # Validating the parameters passed while making the API request to list the tickets - """ - mock_context = MockContext() - expected_param = {'limit': 100, - 'associations': 'contact,company,deals', - 'properties': 'hs_all_team_ids', - 'archived': False - } - expected_return_value = {'currently_syncing': 'tickets', 'bookmarks': { - 'tickets': {'updatedAt': '2023-01-01T00:00:00.000000Z'}}} - - return_value = sync_tickets({'currently_syncing': 'tickets'}, mock_context) - self.assertEqual( - expected_return_value, - return_value - ) - mocked_gen_request.assert_called_once_with('tickets', - 'https://api.hubapi.com/crm/v4/objects/tickets', - expected_param, 'results', 'paging') diff --git a/archive/tap_hubspot/tests/utils.py b/archive/tap_hubspot/tests/utils.py deleted file mode 100644 index 6c3e481..0000000 --- a/archive/tap_hubspot/tests/utils.py +++ /dev/null @@ -1,80 +0,0 @@ -import singer -import singer.bookmarks -import os -import tap_hubspot - -LOGGER = singer.get_logger() - -caught_records = {} -caught_bookmarks = [] -caught_state = {} -caught_schema = {} -caught_pks = {} - - -def verify_environment_vars(): - missing_envs = [x for x in [os.getenv('TAP_HUBSPOT_REDIRECT_URI'), - os.getenv('TAP_HUBSPOT_CLIENT_ID'), - os.getenv('TAP_HUBSPOT_CLIENT_SECRET'), - os.getenv('TAP_HUBSPOT_REFRESH_TOKEN')] if x is None] - if len(missing_envs) != 0: - #pylint: disable=line-too-long - raise Exception("set TAP_HUBSPOT_REDIRECT_URI, TAP_HUBSPOT_CLIENT_ID, TAP_HUBSPOT_CLIENT_SECRET, TAP_HUBSPOT_REFRESH_TOKEN") - -def seed_tap_hubspot_config(): - tap_hubspot.CONFIG = { - "access_token": None, - "token_expires": None, - - "redirect_uri": os.environ['TAP_HUBSPOT_REDIRECT_URI'], - "client_id": os.environ['TAP_HUBSPOT_CLIENT_ID'], - "client_secret": os.environ['TAP_HUBSPOT_CLIENT_SECRET'], - "refresh_token": os.environ['TAP_HUBSPOT_REFRESH_TOKEN'], - "start_date": "2001-01-01T00:00:00Z" - } - -def get_clear_state(): - return { - "bookmarks": { - "contacts": { - "offset": {}, - "lastmodifieddate": None - }, - "companies": { - "offset": {}, - "hs_lastmodifieddate": None - } - - }, - "currently_syncing": None - } - - -#pylint: disable=line-too-long -def our_write_bookmark(state, table_name, bookmark_key, bookmark_value): - caught_bookmarks.append([bookmark_key, bookmark_value]) - state = singer.bookmarks.write_bookmark(state, table_name, bookmark_key, bookmark_value) - return state - -def our_write_schema(table_name, schema, pks): - caught_pks[table_name] = pks - caught_schema[table_name] = schema - -def our_write_state(state): - # pylint: disable=global-statement - LOGGER.info("our_write_state: %s", state) - global caught_state - caught_state = state - return state - -def our_write_record(table_name, record): - if caught_records.get(table_name) is None: - caught_records[table_name] = [] - - caught_records[table_name].append(record) - -def write_to_singer(): - singer.write_bookmark = our_write_bookmark - singer.write_state = our_write_state - singer.write_record = our_write_record - singer.write_schema = our_write_schema diff --git a/archive/tests/base.py b/archive/tests/base.py deleted file mode 100644 index 769eac8..0000000 --- a/archive/tests/base.py +++ /dev/null @@ -1,390 +0,0 @@ -import os -import unittest -from datetime import datetime as dt -from datetime import timedelta - -import tap_tester.menagerie as menagerie -import tap_tester.connections as connections -import tap_tester.runner as runner -from tap_tester.base_case import BaseCase -from tap_tester import LOGGER - - -class HubspotBaseTest(BaseCase): - - REPLICATION_KEYS = "valid-replication-keys" - PRIMARY_KEYS = "table-key-properties" - FOREIGN_KEYS = "table-foreign-key-properties" - REPLICATION_METHOD = "forced-replication-method" - INCREMENTAL = "INCREMENTAL" - FULL = "FULL_TABLE" - - START_DATE_FORMAT = "%Y-%m-%dT00:00:00Z" # %H:%M:%SZ - BASIC_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" - - EXPECTED_PAGE_SIZE = "expected-page-size" - OBEYS_START_DATE = "obey-start-date" - PARENT_STREAM = "parent-stream" - - ####################################### - # Tap Configurable Metadata Methods # - ####################################### - - def setUp(self): - missing_envs = [x for x in [ - 'TAP_HUBSPOT_REDIRECT_URI', - 'TAP_HUBSPOT_CLIENT_ID', - 'TAP_HUBSPOT_CLIENT_SECRET', - 'TAP_HUBSPOT_REFRESH_TOKEN' - ] if os.getenv(x) is None] - if missing_envs: - raise Exception("Missing environment variables: {}".format(missing_envs)) - - @staticmethod - def get_type(): - return "platform.hubspot" - - @staticmethod - def tap_name(): - return "tap-hubspot" - - def get_properties(self): - start_date = dt.today() - timedelta(days=1) - start_date_with_fmt = dt.strftime(start_date, self.START_DATE_FORMAT) - - return {'start_date' : start_date_with_fmt} - - def get_credentials(self): - return {'refresh_token': os.getenv('TAP_HUBSPOT_REFRESH_TOKEN'), - 'client_secret': os.getenv('TAP_HUBSPOT_CLIENT_SECRET'), - 'redirect_uri': os.getenv('TAP_HUBSPOT_REDIRECT_URI'), - 'client_id': os.getenv('TAP_HUBSPOT_CLIENT_ID')} - - def expected_check_streams(self): - return set(self.expected_metadata().keys()) - - def expected_metadata(self): # DOCS_BUG https://stitchdata.atlassian.net/browse/DOC-1523) - """The expected streams and metadata about the streams""" - return { - "campaigns": { - self.PRIMARY_KEYS: {"id"}, - self.REPLICATION_METHOD: self.FULL, - self.OBEYS_START_DATE: False - }, - "companies": { - self.PRIMARY_KEYS: {"companyId"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"property_hs_lastmodifieddate"}, - self.EXPECTED_PAGE_SIZE: 250, - self.OBEYS_START_DATE: True - }, - "contact_lists": { - self.PRIMARY_KEYS: {"listId"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"updatedAt"}, - self.EXPECTED_PAGE_SIZE: 250, - self.OBEYS_START_DATE: True - }, - "contacts": { - self.PRIMARY_KEYS: {"vid"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"versionTimestamp"}, - self.EXPECTED_PAGE_SIZE: 100, - self.OBEYS_START_DATE: True - }, - "contacts_by_company": { - self.PRIMARY_KEYS: {"company-id", "contact-id"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.EXPECTED_PAGE_SIZE: 100, - self.OBEYS_START_DATE: True, - self.PARENT_STREAM: 'companies' - }, - "deal_pipelines": { - self.PRIMARY_KEYS: {"pipelineId"}, - self.REPLICATION_METHOD: self.FULL, - self.OBEYS_START_DATE: False, - }, - "deals": { - self.PRIMARY_KEYS: {"dealId"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"property_hs_lastmodifieddate"}, - self.OBEYS_START_DATE: True - }, - "email_events": { - self.PRIMARY_KEYS: {"id"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"startTimestamp"}, - self.EXPECTED_PAGE_SIZE: 1000, - self.OBEYS_START_DATE: True - }, - "engagements": { - self.PRIMARY_KEYS: {"engagement_id"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"lastUpdated"}, - self.EXPECTED_PAGE_SIZE: 250, - self.OBEYS_START_DATE: True - }, - "forms": { - self.PRIMARY_KEYS: {"guid"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"updatedAt"}, - self.OBEYS_START_DATE: True - }, - "owners": { - self.PRIMARY_KEYS: {"ownerId"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"updatedAt"}, - self.OBEYS_START_DATE: True # TODO is this a BUG? - }, - "subscription_changes": { - self.PRIMARY_KEYS: {"timestamp", "portalId", "recipient"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"startTimestamp"}, - self.EXPECTED_PAGE_SIZE: 1000, - self.OBEYS_START_DATE: True - }, - "workflows": { - self.PRIMARY_KEYS: {"id"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"updatedAt"}, - self.OBEYS_START_DATE: True - }, - "tickets": { - self.PRIMARY_KEYS: {"id"}, - self.REPLICATION_METHOD: self.INCREMENTAL, - self.REPLICATION_KEYS: {"updatedAt"}, - self.EXPECTED_PAGE_SIZE: 100, - self.OBEYS_START_DATE: True - } - } - - ############################# - # Common Metadata Methods # - ############################# - - def expected_primary_keys(self): - """ - return a dictionary with key of table name - and value as a set of primary key fields - """ - return {table: properties.get(self.PRIMARY_KEYS, set()) - for table, properties - in self.expected_metadata().items()} - - - def expected_automatic_fields(self): - """ - return a dictionary with key of table name and value as the primary keys and replication keys - """ - pks = self.expected_primary_keys() - rks = self.expected_replication_keys() - - return {stream: rks.get(stream, set()) | pks.get(stream, set()) - for stream in self.expected_streams()} - - - def expected_replication_method(self): - """return a dictionary with key of table name and value of replication method""" - return {table: properties.get(self.REPLICATION_METHOD, None) - for table, properties - in self.expected_metadata().items()} - - def expected_streams(self): - """A set of expected stream names""" - return set(self.expected_metadata().keys()) - - def expected_replication_keys(self): - """ - return a dictionary with key of table name - and value as a set of replication key fields - """ - return {table: properties.get(self.REPLICATION_KEYS, set()) - for table, properties - in self.expected_metadata().items()} - - def expected_page_limits(self): - return {table: properties.get(self.EXPECTED_PAGE_SIZE, set()) - for table, properties - in self.expected_metadata().items()} - - def expected_primary_keys(self): - - """ - return a dictionary with key of table name - and value as a set of primary key fields - """ - return {table: properties.get(self.PRIMARY_KEYS, set()) - for table, properties - in self.expected_metadata().items()} - - def expected_automatic_fields(self): - auto_fields = {} - for k, v in self.expected_metadata().items(): - auto_fields[k] = v.get(self.PRIMARY_KEYS, set()) | v.get(self.REPLICATION_KEYS, set()) - return auto_fields - - ########################## - # Common Test Actions # - ########################## - - def create_connection_and_run_check(self, original_properties: bool = True): - """Create a new connection with the test name""" - # Create the connection - conn_id = connections.ensure_connection(self, original_properties) - - # Run a check job using orchestrator (discovery) - check_job_name = runner.run_check_mode(self, conn_id) - - # Assert that the check job succeeded - exit_status = menagerie.get_exit_status(conn_id, check_job_name) - menagerie.verify_check_exit_status(self, exit_status, check_job_name) - return conn_id - - def run_and_verify_check_mode(self, conn_id): - """ - Run the tap in check mode and verify it succeeds. - This should be ran prior to field selection and initial sync. - - Return the connection id and found catalogs from menagerie. - """ - # run in check mode - check_job_name = runner.run_check_mode(self, conn_id) - - # verify check exit codes - exit_status = menagerie.get_exit_status(conn_id, check_job_name) - menagerie.verify_check_exit_status(self, exit_status, check_job_name) - - found_catalogs = menagerie.get_catalogs(conn_id) - self.assertGreater(len(found_catalogs), 0, msg="unable to locate schemas for connection {}".format(conn_id)) - - found_catalog_names = set(map(lambda c: c['tap_stream_id'], found_catalogs)) - self.assertSetEqual(self.expected_check_streams(), found_catalog_names, - msg="discovered schemas do not match") - LOGGER.info("discovered schemas are OK") - - return found_catalogs - - def run_and_verify_sync(self, conn_id): - """ - Run a sync job and make sure it exited properly. - Return a dictionary with keys of streams synced - and values of records synced for each stream - """ - # Run a sync job using orchestrator - sync_job_name = runner.run_sync_mode(self, conn_id) - - # Verify tap and target exit codes - exit_status = menagerie.get_exit_status(conn_id, sync_job_name) - menagerie.verify_sync_exit_status(self, exit_status, sync_job_name) - - # Verify actual rows were synced - sync_record_count = runner.examine_target_output_file(self, - conn_id, - self.expected_streams(), - self.expected_primary_keys()) - total_row_count = sum(sync_record_count.values()) - self.assertGreater(total_row_count, 0, - msg="failed to replicate any data: {}".format(sync_record_count)) - LOGGER.info("total replicated row count: %s", total_row_count) - - return sync_record_count - - def perform_and_verify_table_and_field_selection(self, - conn_id, - test_catalogs, - select_all_fields=True): - """ - Perform table and field selection based off of the streams to select - set and field selection parameters. - - Verify this results in the expected streams selected and all or no - fields selected for those streams. - """ - - # Select all available fields or select no fields from all testable streams - self.select_all_streams_and_fields( - conn_id=conn_id, catalogs=test_catalogs, select_all_fields=select_all_fields - ) - - catalogs = menagerie.get_catalogs(conn_id) - - # Ensure our selection affects the catalog - expected_selected = [tc.get('tap_stream_id') for tc in test_catalogs] - for cat in catalogs: - catalog_entry = menagerie.get_annotated_schema(conn_id, cat['stream_id']) - - # Verify all testable streams are selected - selected = catalog_entry.get('annotated-schema').get('selected') - LOGGER.info("Validating selection on %s: %s", cat['stream_name'], selected) - if cat['stream_name'] not in expected_selected: - self.assertFalse(selected, msg="Stream selected, but not testable.") - continue # Skip remaining assertions if we aren't selecting this stream - self.assertTrue(selected, msg="Stream not selected.") - - if select_all_fields: - # Verify all fields within each selected stream are selected - for field, field_props in catalog_entry.get('annotated-schema').get('properties').items(): - field_selected = field_props.get('selected') - LOGGER.info("\tValidating selection on %s.%s: %s", - cat['stream_name'], field, field_selected) - self.assertTrue(field_selected, msg="Field not selected.") - else: - # Verify only automatic fields are selected - expected_automatic_fields = self.expected_automatic_fields().get(cat['tap_stream_id']) - selected_fields = self.get_selected_fields_from_metadata(catalog_entry['metadata']) - self.assertEqual(expected_automatic_fields, selected_fields) - - @staticmethod - def get_selected_fields_from_metadata(metadata): - selected_fields = set() - for field in metadata: - is_field_metadata = len(field['breadcrumb']) > 1 - inclusion_automatic_or_selected = (field['metadata'].get('inclusion') == 'automatic' - or field['metadata'].get('selected') is True) - if is_field_metadata and inclusion_automatic_or_selected: - selected_fields.add(field['breadcrumb'][1]) - return selected_fields - - @staticmethod - def select_all_streams_and_fields(conn_id, catalogs, select_all_fields: bool = True): - """Select all streams and all fields within streams""" - for catalog in catalogs: - schema = menagerie.get_annotated_schema(conn_id, catalog['stream_id']) - - non_selected_properties = [] - if not select_all_fields: - # get a list of all properties so that none are selected - non_selected_properties = schema.get('annotated-schema', {}).get( - 'properties', {}).keys() - - connections.select_catalog_and_fields_via_metadata( - conn_id, catalog, schema, [], non_selected_properties) - - def timedelta_formatted(self, dtime, days=0, str_format="%Y-%m-%dT00:00:00Z"): - date_stripped = dt.strptime(dtime, str_format) - return_date = date_stripped + timedelta(days=days) - - return dt.strftime(return_date, str_format) - - ################################ - # Tap Specific Test Actions # - ################################ - - def datetime_from_timestamp(self, value, str_format="%Y-%m-%dT00:00:00Z"): - """ - Takes in a unix timestamp in milliseconds. - Returns a string formatted python datetime - """ - try: - datetime_value = dt.fromtimestamp(value) - datetime_str = dt.strftime(datetime_value, str_format) - except ValueError as err: - raise NotImplementedError( - f"Invalid argument 'value': {value} " - "This method was designed to accept unix timestamps in milliseconds." - ) - return datetime_str - - def is_child(self, stream): - """return true if this stream is a child stream""" - return self.expected_metadata()[stream].get(self.PARENT_STREAM) is not None diff --git a/archive/tests/client.py b/archive/tests/client.py deleted file mode 100644 index 923a191..0000000 --- a/archive/tests/client.py +++ /dev/null @@ -1,1679 +0,0 @@ -import datetime -import random -import uuid - -import backoff -import requests -from base import HubspotBaseTest -from tap_tester import LOGGER - -DEBUG = False -BASE_URL = "https://api.hubapi.com" - - -class TestClient(): - START_DATE_FORMAT = "%Y-%m-%dT00:00:00Z" - V3_DEALS_PROPERTY_PREFIXES = {'hs_date_entered', 'hs_date_exited', 'hs_time_in'} - BOOKMARK_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' - - ########################################################################## - ### CORE METHODS - ########################################################################## - - def giveup(exc): - """Checks a response status code, returns True if unsuccessful unless rate limited.""" - if exc.response.status_code == 429: - return False - - return exc.response is not None \ - and 400 <= exc.response.status_code < 500 - - @backoff.on_exception(backoff.constant, - (requests.exceptions.RequestException, - requests.exceptions.HTTPError), - max_tries=5, - jitter=None, - giveup=giveup, - interval=10) - def get(self, url, params=dict()): - """Perform a GET using the standard requests method and logs the action""" - response = requests.get(url, params=params, headers=self.HEADERS) - LOGGER.info(f"TEST CLIENT | GET {url} params={params} STATUS: {response.status_code}") - response.raise_for_status() - json_response = response.json() - - return json_response - - @backoff.on_exception(backoff.constant, - (requests.exceptions.RequestException, - requests.exceptions.HTTPError), - max_tries=5, - jitter=None, - giveup=giveup, - interval=10) - def post(self, url, data=dict(), params=dict(), debug=DEBUG): - """Perfroma a POST using the standard requests method and log the action""" - - headers = dict(self.HEADERS) - headers['content-type'] = "application/json" - response = requests.post(url, json=data, params=params, headers=headers) - LOGGER.info( - f"TEST CLIENT | POST {url} data={data} params={params} STATUS: {response.status_code}") - if debug: - LOGGER.debug(response.text) - - response.raise_for_status() - - if response.status_code == 204: - LOGGER.warn(f"TEST CLIENT Response is empty") - # NB: There is a simplejson.scanner.JSONDecodeError thrown when we attempt - # to do a response.json() on a 204 response. To get around this we just return an empty list - # as we assume that a 204 will not have body. A better implementation would be to catch the - # decode error, however were not able to get approach working. - return [] - - json_response = response.json() - return json_response - - @backoff.on_exception(backoff.constant, - (requests.exceptions.RequestException, - requests.exceptions.HTTPError), - max_tries=5, - jitter=None, - giveup=giveup, - interval=10) - def put(self, url, data, params=dict(), debug=DEBUG): - """Perfroma a PUT using the standard requests method and log the action""" - headers = dict(self.HEADERS) - headers['content-type'] = "application/json" - response = requests.put(url, json=data, params=params, headers=headers) - LOGGER.info( - f"TEST CLIENT | PUT {url} data={data} params={params} STATUS: {response.status_code}") - if debug: - LOGGER.debug(response.text) - - response.raise_for_status() - - @backoff.on_exception(backoff.constant, - (requests.exceptions.RequestException, - requests.exceptions.HTTPError), - max_tries=5, - jitter=None, - giveup=giveup, - interval=10) - def patch(self, url, data, params=dict(), debug=DEBUG): - """Perfroma a PATCH using the standard requests method and log the action""" - headers = dict(self.HEADERS) - headers['content-type'] = "application/json" - response = requests.patch(url, json=data, params=params, headers=headers) - LOGGER.info( - f"TEST CLIENT | PATCH {url} data={data} params={params} STATUS: {response.status_code}") - if debug: - LOGGER.debug(response.text) - - response.raise_for_status() - - @backoff.on_exception(backoff.constant, - (requests.exceptions.RequestException, - requests.exceptions.HTTPError), - max_tries=5, - jitter=None, - giveup=giveup, - interval=10) - def delete(self, url, params=dict(), debug=DEBUG): - """Perfroma a POST using the standard requests method and log the action""" - - headers = dict(self.HEADERS) - headers['content-type'] = "application/json" - response = requests.delete(url, params=params, headers=headers) - LOGGER.info(f"TEST CLIENT | DELETE {url} params={params} STATUS: {response.status_code}") - if debug: - LOGGER.debug(response.text) - response.raise_for_status() - - def denest_properties(self, stream, records): - """ - Takes a list of records and checks each for a 'properties' key to denest. - Returns the list of denested records. - """ - for record in records: - if record.get('properties'): - for property_key, property_value in record['properties'].items(): - - if isinstance(property_value, dict): - # if any property has a versions object track it by the top level key 'properties_versions' - if property_value.get('versions'): - if not record.get('properties_versions'): - record['properties_versions'] = [] - record['properties_versions'] += property_value['versions'] - - # denest each property to be a top level key - record[f'property_{property_key}'] = property_value - - LOGGER.info(f"TEST CLIENT | Transforming (denesting) {len(records)} {stream} records") - return records - - def datatype_transformations(self, stream, records): - """ - Takes a list of records and checks each for a 'properties' key to denest. - Returns the list of denested records. - """ - datetime_columns = { - 'owners': {'createdAt', 'updatedAt'}, - } - if stream in datetime_columns.keys(): - for record in records: - for column in record.keys(): - if column in datetime_columns[stream]: - record[column] = self.BaseTest.datetime_from_timestamp( - record[column] / 1000, self.BOOKMARK_DATE_FORMAT - ) - - LOGGER.info( - f"TEST CLIENT | Transforming (datatype conversions) {len(records)} {stream} records") - return records - - ########################################################################## - ### GET - ########################################################################## - def read(self, stream, parent_ids=[], since=''): - - # Resets the access_token if the expiry time is less than or equal to the current time - if self.CONFIG["token_expires"] <= datetime.datetime.utcnow(): - self.acquire_access_token_from_refresh_token() - - if stream == 'forms': - return self.get_forms() - elif stream == 'owners': - return self.get_owners() - elif stream == 'companies': - return self.get_companies(since) - elif stream == 'contact_lists': - return self.get_contact_lists(since) - elif stream == 'contacts_by_company': - return self.get_contacts_by_company(parent_ids) - elif stream == 'engagements': - return self.get_engagements() - elif stream == 'campaigns': - return self.get_campaigns() - elif stream == 'deals': - return self.get_deals() - elif stream == 'workflows': - return self.get_workflows() - elif stream == 'contacts': - return self.get_contacts() - elif stream == 'deal_pipelines': - return self.get_deal_pipelines() - elif stream == 'email_events': - return self.get_email_events() - elif stream == 'subscription_changes': - return self.get_subscription_changes(since) - elif stream == "tickets": - return self.get_tickets() - else: - raise NotImplementedError - - def get_campaigns(self): - """ - Get all campaigns by id, then grab the details of each campaign. - """ - campaign_by_id_url = f"{BASE_URL}/email/public/v1/campaigns/by-id" - campaign_url = f"{BASE_URL}/email/public/v1/campaigns/" - - # get all campaigns by-id - response = self.get(campaign_by_id_url) - campaign_ids = [campaign['id'] for campaign in response['campaigns']] - - # get the detailed record corresponding to each campagin-id - records = [] - for campaign_id in campaign_ids: - url = f"{campaign_url}{campaign_id}" - response = self.get(url) - records.append(response) - - return records - - def _get_company_by_id(self, company_id): - url = f"{BASE_URL}/companies/v2/companies/{company_id}" - response = self.get(url) - return response - - def get_companies(self, since=''): - """ - Get all companies by paginating using 'hasMore' and 'offset'. - """ - url = f"{BASE_URL}/companies/v2/companies/paged" - if not since: - since = self.start_date_strf - - if not isinstance(since, datetime.datetime): - since = datetime.datetime.strptime(since, self.START_DATE_FORMAT) - params = {'properties': ["createdate", "hs_lastmodifieddate"]} - records = [] - - # paginating through all the companies - companies = [] - has_more = True - while has_more: - - response = self.get(url, params=params) - - for company in response['companies']: - if company['properties']['hs_lastmodifieddate']: - company_timestamp = datetime.datetime.fromtimestamp( - company['properties']['hs_lastmodifieddate']['timestamp'] / 1000 - ) - else: - company_timestamp = datetime.datetime.fromtimestamp( - company['properties']['createdate']['timestamp'] / 1000 - ) - - if company_timestamp >= since: - companies.append(company) - - has_more = response['has-more'] - params['offset'] = response['offset'] - - # get the details of each company - for company in companies: - response = self._get_company_by_id(company['companyId']) - records.append(response) - - records = self.denest_properties('companies', records) - - return records - - def get_contact_lists(self, since='', list_id=''): - """ - Get all contact_lists by paginating using 'has-more' and 'offset'. - """ - url = f"{BASE_URL}/contacts/v1/lists" - - if list_id: - url += f"/{list_id}" - response = self.get(url) - - return response - - if since == 'all': - params = {'count': 250} - else: - if not since: - since = self.start_date_strf - - if not isinstance(since, datetime.datetime): - since = datetime.datetime.strptime(since, self.START_DATE_FORMAT) - - since = str(since.timestamp() * 1000).split(".")[0] - params = {'since': since, 'count': 250} - - records = [] - replication_key = list(self.replication_keys['contact_lists'])[0] - - # paginating through allxo the contact_lists - has_more = True - while has_more: - - response = self.get(url, params=params) - for record in response['lists']: - - if since == 'all' or int(since) <= record[replication_key]: - records.append(record) - - has_more = response['has-more'] - params['offset'] = response['offset'] - - return records - - def _get_contacts_by_pks(self, pks): - """ - Get a specific contact by using the primary key value. - - :params pks: vids - :return: the contacts record - """ - url_2 = f"{BASE_URL}/contacts/v1/contact/vids/batch/" - params_2 = { - 'showListMemberships': True, - 'formSubmissionMode': "all", - } - records = [] - # get the detailed contacts records by vids - params_2['vid'] = pks - response_2 = self.get(url_2, params=params_2) - for vid, record in response_2.items(): - ts_ms = int(record['properties']['lastmodifieddate']['value']) / 1000 - converted_ts = self.BaseTest.datetime_from_timestamp( - ts_ms, self.BOOKMARK_DATE_FORMAT - ) - record['versionTimestamp'] = converted_ts - - records.append(record) - - records = self.denest_properties('contacts', records) - - return records[0] - - def get_contacts(self): - """ - Get all contact vids by paginating using 'has-more' and 'vid-offset/vidOffset'. - Then use the vids to grab the detailed contacts records. - """ - url_1 = f"{BASE_URL}/contacts/v1/lists/all/contacts/all" - params_1 = { - 'showListMemberships': True, - 'includeVersion': True, - 'count': 100, - } - vids = [] - url_2 = f"{BASE_URL}/contacts/v1/contact/vids/batch/" - params_2 = { - 'showListMemberships': True, - 'formSubmissionMode': "all", - } - records = [] - - has_more = True - while has_more: - # get a page worth of contacts and pull the vids - response_1 = self.get(url_1, params=params_1) - vids = [record['vid'] for record in response_1['contacts'] - if record['versionTimestamp'] >= self.start_date] - has_more = response_1['has-more'] - params_1['vidOffset'] = response_1['vid-offset'] - - # get the detailed contacts records by vids - params_2['vid'] = vids - response_2 = self.get(url_2, params=params_2) - records.extend([record for record in response_2.values()]) - - records = self.denest_properties('contacts', records) - return records - - def get_contacts_by_company(self, parent_ids): - """ - Get all contacts_by_company iterating over compnayId's and - paginating using 'hasMore' and 'vidOffset'. This stream is essentially - a join on contacts and companies. - NB: This stream is a CHILD of 'companies'. If any test needs to pull expected - data from this endpoint, it requires getting all 'companies' data and then - pulling the 'companyId' from each record to perform the corresponding get here. - """ - - url = f"{BASE_URL}/companies/v2/companies/{{}}/vids" - params = dict() - records = [] - - for parent_id in parent_ids: - child_url = url.format(parent_id) - has_more = True - while has_more: - - response = self.get(child_url, params=params) - for vid in response.get('vids', {}): - records.extend([{'company-id': parent_id, - 'contact-id': vid}]) - - has_more = response['hasMore'] - params['vidOffset'] = response['vidOffset'] - - params = dict() - - return records - - def get_deal_pipelines(self): - """ - Get all deal_pipelines. - """ - url = f"{BASE_URL}/deals/v1/pipelines" - records = [] - - response = self.get(url) - records.extend(response) - - records = self.denest_properties('deal_pipelines', records) - return records - - def _get_deals_by_pk(self, deal_id): - url = f"{BASE_URL}/deals/v1/deal/{deal_id}" - params = {'includeAllProperties': True} - response = self.get(url, params=params) - - return response - - def get_deals(self): - """ - Get all deals from the v1 endpoiint by paginating using 'hasMore' and 'offset'. - For each deals record denest 'properties' so that they are prefxed with 'property_' - and located at the top level. - """ - v1_url = f"{BASE_URL}/deals/v1/deal/paged" - - v1_params = {'includeAllProperties': True, - 'allPropertiesFetchMode': 'latest_version', - 'properties': []} - replication_key = list(self.replication_keys['deals'])[0] - records = [] - - # hit the v1 endpoint to get the record - has_more = True - while has_more: - response = self.get(v1_url, params=v1_params) - records.extend([record for record in response['deals'] - # Here replication key of the deals stream is derived from "hs_lastmodifieddate" field. - if record['properties']["hs_lastmodifieddate"][ - 'timestamp'] >= self.start_date]) - has_more = response['hasMore'] - v1_params['offset'] = response['offset'] - - # batch the v1 response ids into groups of 100 - v1_ids = [{'id': str(record['dealId'])} for record in records] - batches = [] - batch_size = 100 - for i in range(0, len(v1_ids), batch_size): - batches.append(v1_ids[i:i + batch_size]) - - # hit the v3 endpoint to get the special hs_ fields from v3 'properties' - v3_url = f"{BASE_URL}/crm/v3/objects/deals/batch/read" - v3_property = ['hs_date_entered_appointmentscheduled'] - v3_records = [] - for batch in batches: - data = {'inputs': batch, - 'properties': v3_property} - v3_response = self.post(v3_url, data) - v3_records += v3_response['results'] - - # pull the desired properties from the v3 records and add them to correspond v1 records - for v3_record in v3_records: - for record in records: - if v3_record['id'] == str(record['dealId']): - # don't inclue the v3 property if the value is None - non_null_v3_properties = {v3_property_key: v3_property_value - for v3_property_key, v3_property_value in - v3_record['properties'].items() - if v3_property_value is not None} - - # only grab v3 properties with a specific prefix - trimmed_v3_properties = {v3_property_key: v3_property_value - for v3_property_key, v3_property_value in - non_null_v3_properties.items() - if any([v3_property_key.startswith(prefix) - for prefix in - self.V3_DEALS_PROPERTY_PREFIXES])} - - # the v3 properties must be restructured into objects to match v1 - v3_properties = {v3_property_key: {'value': v3_property_value} - for v3_property_key, v3_property_value in - trimmed_v3_properties.items()} - - # add the v3 record properties to the v1 record - record['properties'].update(v3_properties) - - records = self.denest_properties('deals', records) - return records - - def get_email_events(self, recipient=''): - """ - Get all email_events by paginating using 'hasMore' and 'offset'. - """ - url = f"{BASE_URL}/email/public/v1/events" - replication_key = list(self.replication_keys['email_events'])[0] - params = dict() - if recipient: - params['recipient'] = recipient - records = [] - - has_more = True - while has_more: - response = self.get(url, params=params) - - records.extend([record for record in response['events'] - if record['created'] >= self.start_date]) - - has_more = response['hasMore'] - params['offset'] = response['offset'] - - return records - - def _get_engagements_by_pk(self, engagement_id): - """ - Get a specific engagement reocrd using it's id - """ - url = f"{BASE_URL}/engagements/v1/engagements/{engagement_id}" - - response = self.get(url) - - # added by tap - response['engagement_id'] = response['engagement']['id'] - response['lastUpdated'] = response['engagement']['lastUpdated'] - - return response - - def get_engagements(self): - """ - Get all engagements by paginating using 'hasMore' and 'offset'. - """ - url = f"{BASE_URL}/engagements/v1/engagements/paged" - replication_key = list(self.replication_keys['engagements'])[0] - params = {'limit': 250} - records = [] - - has_more = True - while has_more: - - response = self.get(url, params=params) - for result in response['results']: - if result['engagement'][replication_key] >= self.start_date: - result['engagement_id'] = result['engagement']['id'] - result['lastUpdated'] = result['engagement']['lastUpdated'] - records.append(result) - - has_more = response['hasMore'] - params['offset'] = response['offset'] - - return records - - def _get_forms_by_pk(self, form_id): - """ - Get a specific forms record using the 'form_guid'. - :params form_id: the 'form_guid' value - """ - url = f"{BASE_URL}/forms/v2/forms/{form_id}" - response = self.get(url) - - return response - - def get_forms(self): - """ - Get all forms. - """ - url = f"{BASE_URL}/forms/v2/forms" - replication_key = list(self.replication_keys['forms'])[0] - records = [] - - response = self.get(url) - records.extend([record for record in response - if record[replication_key] >= self.start_date]) - - return records - - def get_owners(self): - """ - Get all owners. - """ - url = f"{BASE_URL}/owners/v2/owners" - records = self.get(url) - transformed_records = self.datatype_transformations('owners', records) - return transformed_records - - def get_subscription_changes(self, since=''): - """ - Get all subscription_changes from 'since' date by paginating using 'hasMore' and 'offset'. - Default since date is one week ago - """ - url = f"{BASE_URL}/email/public/v1/subscriptions/timeline" - params = dict() - records = [] - replication_key = list(self.replication_keys['subscription_changes'])[0] - if not since: - since = self.start_date_strf - - if not isinstance(since, datetime.datetime): - since = datetime.datetime.strptime(since, self.START_DATE_FORMAT) - since = str(since.timestamp() * 1000).split(".")[0] - # copied overparams = {'properties': ["createdate", "hs_lastmodifieddate"]} - has_more = True - while has_more: - response = self.get(url, params=params) - has_more = response['hasMore'] - params['offset'] = response['offset'] - for record in response['timeline']: - # Future Testing TDL-16166 | Investigate difference between timestamp and startTimestamp - # this won't be feasible until BUG_TDL-14938 is addressed - if int(since) <= record['timestamp']: - records.append(record) - - return records - - def _get_workflows_by_pk(self, workflow_id=''): - """Get a specific workflow by pk value""" - url = f"{BASE_URL}/automation/v3/workflows/{workflow_id}" - - response = self.get(url) - - return response - - def get_workflows(self): - """ - Get all workflows. - """ - url = f"{BASE_URL}/automation/v3/workflows/" - replication_key = list(self.replication_keys['workflows'])[0] - records = [] - - response = self.get(url) - - records.extend([record for record in response['workflows'] - if record[replication_key] >= self.start_date]) - return records - - def _get_tickets_by_pk(self, ticket_id): - """ - Get a specific ticket by pk value - HubSpot API https://developers.hubspot.com/docs/api/crm/tickets - """ - url = f"{BASE_URL}/crm/v4/objects/tickets/{ticket_id}?associations=contact,company,deals" - response = self.get(url) - return response - - def get_tickets_properties(self): - """ - Get tickets properties. - HubSpot API https://developers.hubspot.com/docs/api/crm/tickets - """ - url = f"{BASE_URL}/crm/v3/properties/tickets" - # records = [] - records = self.get(url) - - return ",".join([record["name"] for record in records["results"]]) - - def get_tickets(self): - """ - Get all tickets. - HubSpot API https://developers.hubspot.com/docs/api/crm/tickets - """ - url = f"{BASE_URL}/crm/v4/objects/tickets" - replication_key = list(self.replication_keys["tickets"])[0] - records = [] - - # response = self.get(url) - - params = {"limit": 100, "associations": "contact,company,deals", 'properties': self.get_tickets_properties()} - while True: - response = self.get(url, params=params) - - records.extend([record - for record in response["results"] - if record[replication_key] >= self.start_date_strf.replace('.Z', '.000Z')]) - - if not response.get("paging"): - break - params["after"] = response.get("paging").get("next").get("after") - - records = self.denest_properties('tickets', records) - return records - - ########################################################################## - ### CREATE - ########################################################################## - - def create(self, stream, company_ids=[], subscriptions=[], times=1): - """Dispatch create to make tests clean.""" - - # Resets the access_token if the expiry time is less than or equal to the current time - if self.CONFIG["token_expires"] <= datetime.datetime.utcnow(): - self.acquire_access_token_from_refresh_token() - - if stream == 'forms': - return self.create_forms() - elif stream == 'owners': - return self.create_owners() - elif stream == 'companies': - return self.create_companies() - elif stream == 'contact_lists': - return self.create_contact_lists() - elif stream == 'contacts_by_company': - return self.create_contacts_by_company(company_ids, times=times) - elif stream == 'engagements': - return self.create_engagements() - elif stream == 'campaigns': - return self.create_campaigns() - elif stream == 'deals': - return self.create_deals() - elif stream == 'workflows': - return self.create_workflows() - elif stream == 'contacts': - return self.create_contacts() - elif stream == 'deal_pipelines': - return self.create_deal_pipelines() - elif stream == 'email_events': - LOGGER.warn( - f"TEST CLIENT | Calling the create_subscription_changes method to generate {stream} records" - ) - return self.create_subscription_changes() - elif stream == 'subscription_changes': - return self.create_subscription_changes(subscriptions, times) - elif stream == 'tickets': - return self.create_tickets() - else: - raise NotImplementedError(f"There is no create_{stream} method in this dipatch!") - - def create_contacts(self): - """ - Generate a single contacts record. - Hubspot API https://legacydocs.hubspot.com/docs/methods/contacts/create_contact - """ - record_uuid = str(uuid.uuid4()).replace('-', '') - - url = f"{BASE_URL}/contacts/v1/contact" - data = { - "properties": [ - { - "property": "email", - "value": f"{record_uuid}@stitchdata.com" - }, - { - "property": "firstname", - "value": "Yusaku" - }, - { - "property": "lastname", - "value": "Kasahara" - }, - { - "property": "website", - "value": "http://app.stitchdata.com" - }, - { - "property": "phone", - "value": "555-122-2323" - }, - { - "property": "address", - "value": "25 First Street" - }, - { - "property": "city", - "value": "Cambridge" - }, - { - "property": "state", - "value": "MA" - }, - { - "property": "zip", - "value": "02139" - } - ] - } - - # generate a contacts record - response = self.post(url, data) - records = [response] - - get_url = f"{BASE_URL}/contacts/v1/contact/vid/{response['vid']}/profile" - params = {'includeVersion': True} - get_resp = self.get(get_url, params=params) - - converted_versionTimestamp = self.BaseTest.datetime_from_timestamp( - get_resp['versionTimestamp'] / 1000, self.BOOKMARK_DATE_FORMAT - ) - get_resp['versionTimestamp'] = converted_versionTimestamp - records = self.denest_properties('contacts', [get_resp]) - - return records - - def create_campaigns(self): - """ - Couldn't find endpoint... - """ - # record_uuid = str(uuid.uuid4()).replace('-', '') - - # url = f"{BASE_URL}" - # data = {} - # generate a record - # response = self.post(url, data) - # records = [response] - # return records - raise NotImplementedError("No endpoint available in hubspot api.") - - def create_companies(self): - """ - It takes about 6 seconds after the POST for the created record to be caught by the next GET. - This is intended for generating one record for companies. - HubSpot API https://legacydocs.hubspot.com/docs/methods/companies/create_company - """ - record_uuid = str(uuid.uuid4()).replace('-', '') - - url = f"{BASE_URL}/companies/v2/companies/" - data = {"properties": [{"name": "name", "value": f"Company Name {record_uuid}"}, - {"name": "description", "value": "company description"}]} - - # generate a record - response = self.post(url, data) - records = [response] - return records - - def create_contact_lists(self): - """ - HubSpot API https://legacydocs.hubspot.com/docs/methods/lists/create_list - - NB: This generates a list based on a 'twitterhandle' filter. There are many - different filters, but at the time of implementation it did not seem that - using different filters would result in any new fields. - """ - record_uuid = str(uuid.uuid4()).replace('-', '') - - url = f"{BASE_URL}/contacts/v1/lists/" - data = { - "name": f"tweeters{record_uuid}", - "dynamic": True, - "filters": [ - [{ - "operator": "EQ", - "value": f"@hubspot{record_uuid}", - "property": "twitterhandle", - "type": "string" - }] - ] - } - # generate a record - response = self.post(url, data) - records = [response] - return records - - def create_contacts_by_company(self, company_ids=[], contact_records=[], times=1): - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/associate-objects - """ - url = f"{BASE_URL}/crm-associations/v1/associations" - if not company_ids: - company_ids = [company['companyId'] for company in self.get_companies()] - if not contact_records: - contact_records = self.get_contacts() - - records = [] - for _ in range(times): - for company_id in set(company_ids): - for contact in contact_records: - # look for a contact that is not already in the contacts_by_company list - if contact['vid'] not in [record['contact-id'] for record in records]: - contact_id = contact['vid'] - data = { - "fromObjectId": company_id, - "toObjectId": contact_id, - "category": "HUBSPOT_DEFINED", - "definitionId": 2 - } - # generate a record - self.put(url, data) - record = {'company-id': company_id, 'contact-id': contact_id} - records.append(record) - break - - if records: - break - - return records - - def create_deal_pipelines(self): - """ - HubSpot API - https://legacydocs.hubspot.com/docs/methods/pipelines/create_new_pipeline - """ - timestamp1 = str(datetime.datetime.now().timestamp()).replace(".", "") - timestamp2 = str(datetime.datetime.now().timestamp()).replace(".", "") - url = f"{BASE_URL}/crm-pipelines/v1/pipelines/deals" - data = { - "pipelineId": timestamp1, - "label": f"API test ticket pipeline {timestamp1}", - "displayOrder": 2, - "active": True, - "stages": [ - { - "stageId": f"example_stage {timestamp1}", - "label": f"Example stage{timestamp1}", - "displayOrder": 1, - "metadata": { - "probability": 0.5 - } - }, - { - "stageId": f"another_example_stage{timestamp2}", - "label": f"Another example stage{timestamp2}", - "displayOrder": 2, - "metadata": { - "probability": 1.0 - } - } - ] - } - - # generate a record - response = self.post(url, data) - records = [response] - return records - - def create_deals(self): - """ - HubSpot API https://legacydocs.hubspot.com/docs/methods/deals/create_deal - - NB: We are currently using the 'default' pipeline and a single stage. This - is intentional so that we do not accidentally use a pipeline that may be deleted. - """ - record_uuid = str(uuid.uuid4()).replace('-', '') - - url = f"{BASE_URL}/deals/v1/deal/" - data = { - "associations": { - "associatedCompanyIds": [ - 6804176293 - ], - "associatedVids": [ - 2304 - ] - }, - "properties": [ - { - "value": "Tim's Newer Deal", - "name": "dealname" - }, - { - "value": "appointmentscheduled", - "name": "dealstage" - }, - { - "value": "default", - "name": "pipeline" - }, - { - "value": "98621200", - "name": "hubspot_owner_id" - }, - { - "value": 1409443200000, - "name": "closedate" - }, - { - "value": "60000", - "name": "amount" - }, - { - "value": "newbusiness", - "name": "dealtype" - } - ] - } - - # generate a record - response = self.post(url, data) - records = [response] - return records - - def create_tickets(self): - """ - HubSpot API https://developers.hubspot.com/docs/api/crm/tickets - """ - url = f"{BASE_URL}/crm/v4/objects/tickets" - record_uuid = str(uuid.uuid4()).replace('-', '') - data = { - "properties": { - "content": f"Created for testing purpose - {record_uuid}", - "hs_pipeline": "0", - "hs_pipeline_stage": "1", - "hs_ticket_priority": "MEDIUM", - "subject": f"Sample ticket name - {record_uuid}" - } - } - - # generate a record - response = self.post(url, data) - return [response] - - def create_email_events(self): - """ - HubSpot API https://legacydocs.hubspot.com/docs/methods/email/email_events_overview - - We are able to create email_events by updating email subscription status with a PUT (create_subscription_changes()). - If trying to expand data for other email_events, manually creating data and pinning start_date for a connection is - the preferred approach. We do not currently rely on this approach. - """ - - raise NotImplementedError( - "Use create_subscription_changes instead to create records for email_events stream") - - def create_engagements(self): - """ - HubSpot API https://legacydocs.hubspot.com/docs/methods/engagements/create_engagement - NB: Dependent on valid (currently hardcoded) companyId, and ownerId. - THIS IS A POTENTIAL POINT OF INSTABILITY FOR THE TESTS - """ - record_uuid = str(uuid.uuid4()).replace('-', '') - - # gather all contacts and randomly choose one that has not hit the limit - contact_records = self.get_contacts() - contact_ids = [contact['vid'] - for contact in contact_records - if contact['vid'] != 2304] # contact 2304 has hit the 10,000 assoc limit - contact_id = random.choice(contact_ids) - - url = f"{BASE_URL}/engagements/v1/engagements" - data = { - "engagement": { - "active": True, - "ownerId": 98621200, - "type": "NOTE", - "timestamp": 1409172644778 - }, - "associations": { - "contactIds": [contact_id], - "companyIds": [6804176293], - "dealIds": [], - "ownerIds": [], - "ticketIds": [] - }, - "attachments": [ - { - "id": 4241968539 - } - ], - "metadata": { - "body": "note body" - } - } - - # generate a record - response = self.post(url, data) - response['engagement_id'] = response['engagement']['id'] - - records = [response] - return records - - def create_forms(self): - """ - HubSpot API https://legacydocs.hubspot.com/docs/methods/forms/v2/create_form - """ - record_uuid = str(uuid.uuid4()).replace('-', '') - - url = f"{BASE_URL}/forms/v2/forms" - data = { - "name": f"DemoForm{record_uuid}", - "action": "", - "method": "", - "cssClass": "", - "redirect": "", - "submitText": "Submit", - "followUpId": "", - "notifyRecipients": "", - "leadNurturingCampaignId": "", - "formFieldGroups": [ - { - "fields": [ - { - "name": "firstname", - "label": "First Name", - "type": "string", - "fieldType": "text", - "description": "", - "groupName": "", - "displayOrder": 0, - "required": False, - "selectedOptions": [], - "options": [], - "validation": { - "name": "", - "message": "", - "data": "", - "useDefaultBlockList": False - }, - "enabled": True, - "hidden": False, - "defaultValue": "", - "isSmartField": False, - "unselectedLabel": "", - "placeholder": "" - } - ], - "default": True, - "isSmartGroup": False - }, - { - "fields": [ - { - "name": "lastname", - "label": "Last Name", - "type": "string", - "fieldType": "text", - "description": "", - "groupName": "", - "displayOrder": 1, - "required": False, - "selectedOptions": [], - "options": [], - "validation": { - "name": "", - "message": "", - "data": "", - "useDefaultBlockList": False - }, - "enabled": True, - "hidden": False, - "defaultValue": "", - "isSmartField": False, - "unselectedLabel": "", - "placeholder": "" - } - ], - "default": True, - "isSmartGroup": False - }, - # KDS: Removed due to INVALID_FORM_FIELDS error. - # { - # "fields": [ - # { - # "name": "adress_1", - # "label": "Adress 1", - # "type": "string", - # "fieldType": "text", - # "description": "", - # "groupName": "", - # "displayOrder": 2, - # "required": False, - # "selectedOptions": [], - # "options": [], - # "validation": { - # "name": "", - # "message": "", - # "data": "", - # "useDefaultBlockList": False - # }, - # "enabled": True, - # "hidden": False, - # "defaultValue": "", - # "isSmartField": False, - # "unselectedLabel": "", - # "placeholder": "" - # } - # ], - # "default": True, - # "isSmartGroup": False - # } - ], - "performableHtml": "", - "migratedFrom": "ld", - "ignoreCurrentValues": False, - "metaData": [], - "deletable": True - } - - # generate a record - response = self.post(url, data) - records = [response] - return records - - def create_owners(self): - """ - HubSpot API The Owners API is read-only. Owners can only be created in HubSpot. - """ - raise NotImplementedError( - "Only able to create owners from web app manually. No api endpoint exists.") - - def create_subscription_changes(self, subscriptions=[], times=1): - """ - HubSpot API https://legacydocs.hubspot.com/docs/methods/email/update_status - - NB: This will update email_events as well. - """ - # by default, a new subscription change will be created from a previous subscription change from one week ago as defined in the get - if subscriptions == []: - subscriptions = self.get_subscription_changes() - subscription_id_list = [[change.get('subscriptionId') for change in subscription['changes']] - for subscription in subscriptions] - count = 0 - email_records = [] - subscription_records = [] - LOGGER.info(f"creating {times} records") - - for item in subscription_id_list: - if count < times: - # if item[0] - record_uuid = str(uuid.uuid4()).replace('-', '') - recipient = record_uuid + "@stitchdata.com" - url = f"{BASE_URL}/email/public/v1/subscriptions/{recipient}" - data = { - "subscriptionStatuses": [ - { - "id": item[0], # a_sub_id, - "subscribed": True, - "optState": "OPT_IN", - "legalBasis": "PERFORMANCE_OF_CONTRACT", - "legalBasisExplanation": "We need to send them these emails as part of our agreement with them." - } - ] - } - # generate a record - response = self.put(url, data) - - # Cleanup this method once BUG_TDL-14938 is addressed - # The intention is for this method to return both of the objects that it creates with this put - - email_event = self.get_email_events(recipient=recipient) - # subscriptions = self.get_subscription_changes() - # if len(email_event) > 1 or len(subscription_change) > 1: - # raise RuntimeError( - # "Expected this change to generate 1 email_event and 1 subscription_change only. " - # "Generate {len(email_event)} email_events and {len(subscription_changes)} subscription_changes." - # ) - email_records.extend(email_event) - # subscription_records.append(subscription_change) - count += 1 - - return email_records # , subscription_records - - def create_workflows(self): - """ - HubSpot API https://legacydocs.hubspot.com/docs/methods/workflows/v3/create_workflow - """ - record_uuid = str(uuid.uuid4()).replace('-', '') - - url = f"{BASE_URL}/automation/v3/workflows" - data = { - "name": "Test Workflow", - "type": "DRIP_DELAY", - "onlyEnrollsManually": True, - "enabled": True, - "actions": [ - { - "type": "DELAY", - "delayMillis": 3600000 - }, - { - "newValue": "HubSpot", - "propertyName": "company", - "type": "SET_CONTACT_PROPERTY" - }, - { - "type": "WEBHOOK", - "url": "https://www.myintegration.com/webhook.php", - "method": "POST", - "authCreds": { - "user": "user", - "password": "password" - } - } - ] - } - - # generate a record - response = self.post(url, data) - records = [response] - return records - - ########################################################################## - ### Updates - ########################################################################## - - def update(self, stream, record_id): - - # Resets the access_token if the expiry time is less than or equal to the current time - if self.CONFIG["token_expires"] <= datetime.datetime.utcnow(): - self.acquire_access_token_from_refresh_token() - - if stream == 'companies': - return self.update_companies(record_id) - elif stream == 'contacts': - return self.update_contacts(record_id) - elif stream == 'contact_lists': - return self.update_contact_lists(record_id) - elif stream == 'deal_pipelines': - return self.update_deal_pipelines(record_id) - elif stream == 'deals': - return self.update_deals(record_id) - elif stream == 'forms': - return self.update_forms(record_id) - elif stream == 'engagements': - return self.update_engagements(record_id) - elif stream == 'tickets': - return self.update_tickets(record_id) - else: - raise NotImplementedError(f"Test client does not have an update method for {stream}") - - def update_workflows(self, workflow_id, contact_email): - """ - Update a workflow by enrolling a contact in the workflow. - Hubspot API https://legacydocs.hubspot.com/docs/methods/workflows/add_contact - - NB: Attemtped to enroll a contact but this did not change anything on the record. Enrollment is handled by - settings which are fields on a workflows record. The actual contacts' enrollment is not part of this record. - """ - - raise NotImplementedError("No endpoint in hubspot api for updating workflows.") - - def updated_subscription_changes(self, subscription_id): - return self.create_subscription_changes(subscription_id) - - def update_campaigns(self): - """ - Couldn't find endpoint... - """ - raise NotImplementedError("No endpoint for updating campaigns in hubspot api.") - - def update_companies(self, company_id): - """ - Update a company by changing it's description - :param company_id: the primary key value of the company to update - :return: the updated record using the _get_company_by_id - - Hubspot API https://legacydocs.hubspot.com/docs/methods/companies/update_company - """ - url = f"{BASE_URL}/companies/v2/companies/{company_id}" - - record_uuid = str(uuid.uuid4()).replace('-', '') - data = { - "properties": [ - { - "name": "description", - "value": f"An updated description {record_uuid}" - } - ] - } - self.put(url, data) - - record = self._get_company_by_id(company_id) - - return record - - def update_contacts(self, vid): - """ - Update a single contact record with a new email. - Hubspot API https://legacydocs.hubspot.com/docs/methods/contacts/update_contact - - :param vid: the primary key value of the record to update - :return: the updated record using the get_contracts_by_pks method - """ - url = f"{BASE_URL}/contacts/v1/contact/vid/{vid}/profile" - - record_uuid = str(uuid.uuid4()).replace('-', '') - data = { - "properties": [ - { - "property": "email", - "value": f"{record_uuid}@stitchdata.com" - }, - { - "property": "firstname", - "value": "Updated" - }, - { - "property": "lastname", - "value": "Record" - }, - { - "property": "lifecyclestage", - "value": "customer" - } - ] - } - _ = self.post(url, data=data) - - record = self._get_contacts_by_pks(pks=[vid]) - - return record - - def update_contact_lists(self, list_id): - """ - Update a single contact list. - Hubspot API https://legacydocs.hubspot.com/docs/methods/lists/update_list - - :param list_id: the primary key value of the record to update - :return: the updated record using the get_contracts_by_pks method - """ - url = f"{BASE_URL}/contacts/v1/lists/{list_id}" - - record_uuid = str(uuid.uuid4()).replace('-', '') - data = {"name": f"Updated {record_uuid}"} - - _ = self.post(url, data=data) - - record = self.get_contact_lists(since='', list_id=list_id) - - return record - - def update_deal_pipelines(self, pipeline_id): - """ - Update a deal_pipeline record by changing it's label. - :param: - :return: - """ - url = f"{BASE_URL}/crm-pipelines/v1/pipelines/deals/{pipeline_id}" - - record_uuid = str(uuid.uuid4()).replace('-', '')[:20] - data = { - "label": f"Updated {record_uuid}", - "displayOrder": 4, - "active": True, - "stages": [ - { - "stageId": record_uuid, - "label": record_uuid, - "displayOrder": 1, - "metadata": { - "probability": 0.5 - } - }, - ] - } - - _ = self.put(url, data=data) - - deal_pipelines = self.get_deal_pipelines() - record = [pipeline for pipeline in deal_pipelines - if pipeline['pipelineId'] == pipeline_id][0] - - return record - - def update_deals(self, deal_id): - """ - HubSpot API https://legacydocs.hubspot.com/docs/methods/deals/update_deal - - :param deal_id: the pk value of the deal record to update - :return: the updated deal record using a PUT and the results from a GET - """ - url = f"{BASE_URL}/deals/v1/deal/{deal_id}" - - record_uuid = str(uuid.uuid4()).replace('-', '')[:20] - data = { - "properties": [ - { - "value": f"Updated {record_uuid}", - "name": "dealname" - }, - ] - } - - # generate a record - _ = self.put(url, data) - - response = self._get_deals_by_pk(deal_id) - - return response - - def update_forms(self, form_id): - """ - Hubspot API https://legacydocs.hubspot.com/docs/methods/forms/v2/update_form - - :params form_id: the pk value of the form record to update - :return: the updated form record using the GET endpoint - """ - url = f"{BASE_URL}/forms/v2/forms/{form_id}" - record_uuid = str(uuid.uuid4()).replace('-', '')[:20] - - data = { - "name": f"Updated {record_uuid}" - } - _ = self.put(url, data=data) - - response = self._get_forms_by_pk(form_id) - - return response - - def update_owners(self): - """ - HubSpot API The Owners API is read-only. Owners can only be updated in HubSpot. - """ - raise NotImplementedError( - "Only able to update owners from web app manuanlly. No API endpoint in hubspot.") - - def update_campaigns(self): - """ - HubSpot API The Campaigns API is read-only. Campaigns can only be updated in HubSpot. - """ - raise NotImplementedError( - "Only able to update campaigns from web app manuanlly. No API endpoint in hubspot.") - - def update_engagements(self, engagement_id): - """ - Hubspot API https://legacydocs.hubspot.com/docs/methods/engagements/update_engagement-patch - :params engagement_id: the pk value of the engagment record to update - :return: - """ - url = f"{BASE_URL}/engagements/v1/engagements/{engagement_id}" - - record_uuid = str(uuid.uuid4()).replace('-', '')[:20] - data = { - "metadata": { - "body": f"Updated {record_uuid}" - } - } - - self.patch(url, data) - - record = self._get_engagements_by_pk(engagement_id) - - return record - - def update_tickets(self, ticket_id): - """ - Hubspot API https://developers.hubspot.com/docs/api/crm/tickets - :params ticket_id: the pk value of the ticket record to update - :return: - """ - url = f"{BASE_URL}/crm/v4/objects/tickets/{ticket_id}" - - record_uuid = str(uuid.uuid4()).replace('-', '')[:20] - data = { - "properties": { - "subject": f"update record for testing - {record_uuid}" - } - } - - self.patch(url, data) - - return self._get_tickets_by_pk(ticket_id) - - ########################################################################## - ### Deletes - ########################################################################## - def cleanup(self, stream, records, count=10): - - # Resets the access_token if the expiry time is less than or equal to the current time - if self.CONFIG["token_expires"] <= datetime.datetime.utcnow(): - self.acquire_access_token_from_refresh_token() - - if stream == 'deal_pipelines': - self.delete_deal_pipelines(records, count) - elif stream == 'contact_lists': - self.delete_contact_lists(records, count) - else: - raise NotImplementedError(f"No delete method implemented for {stream}.") - - def delete_contact_lists(self, records=[], count=10): - """ - https://legacydocs.hubspot.com/docs/methods/lists/delete_list - """ - if not records: - records = self.get_contact_lists() - - record_ids_to_delete = [record['listId'] for record in records] - if len(record_ids_to_delete) == 1 or \ - len(record_ids_to_delete) <= count: - raise RuntimeError( - "delete count is greater or equal to the number of existing records for contact_lists, " - "need to have at least one record remaining" - ) - for record_id in record_ids_to_delete[:count]: - url = f"{BASE_URL}/contacts/v1/lists/{record_id}" - - self.delete(url) - - def delete_deal_pipelines(self, records=[], count=10): - """ - Delete older records based on timestamp primary key - https://legacydocs.hubspot.com/docs/methods/pipelines/delete_pipeline - """ - if not records: - records = self.get_deal_pipelines() - - record_ids_to_delete = [record['pipelineId'] for record in records] - if len(record_ids_to_delete) == 1 or \ - len(record_ids_to_delete) <= count: - raise RuntimeError( - "delete count is greater or equal to the number of existing records for deal_pipelines, " - "need to have at least one record remaining" - ) - for record_id in record_ids_to_delete: - if record_id == 'default' or len( - record_id) > 16: # not a timestamp, not made by this client - continue # skip - - url = f"{BASE_URL}/crm-pipelines/v1/pipelines/deals/{record_id}" - self.delete(url) - - count -= 1 - if count == 0: - return - - ########################################################################## - ### OAUTH - ########################################################################## - - def acquire_access_token_from_refresh_token(self): - """ - NB: This will need to be updated if authorization is ever updated in the tap. We - attempted to import this from the tap to lessen the maintenance burden, but we - hit issues with the relative import. - """ - payload = { - "grant_type": "refresh_token", - "redirect_uri": self.CONFIG['redirect_uri'], - "refresh_token": self.CONFIG['refresh_token'], - "client_id": self.CONFIG['client_id'], - "client_secret": self.CONFIG['client_secret'], - } - - response = requests.post(BASE_URL + "/oauth/v1/token", data=payload) - response.raise_for_status() - auth = response.json() - self.CONFIG['access_token'] = auth['access_token'] - self.CONFIG['refresh_token'] = auth['refresh_token'] - self.CONFIG['token_expires'] = ( - datetime.datetime.utcnow() + - datetime.timedelta(seconds=auth['expires_in'] - 600)) - self.HEADERS = {'Authorization': f"Bearer {self.CONFIG['access_token']}"} - LOGGER.info(f"TEST CLIENT | Token refreshed. Expires at {self.CONFIG['token_expires']}") - - def __init__(self, start_date=''): - self.BaseTest = HubspotBaseTest() - self.replication_keys = self.BaseTest.expected_replication_keys() - self.CONFIG = self.BaseTest.get_credentials() - self.CONFIG.update(self.BaseTest.get_properties()) - - self.start_date_strf = start_date if start_date else self.CONFIG['start_date'] - self.start_date = datetime.datetime.strptime( - self.start_date_strf, self.BaseTest.START_DATE_FORMAT - ).timestamp() * 1000 - - self.acquire_access_token_from_refresh_token() - - contact_lists_records = self.get_contact_lists(since='all') - deal_pipelines_records = self.get_deal_pipelines() - stream_limitations = {'deal_pipelines': [100, deal_pipelines_records], - 'contact_lists': [1500, contact_lists_records]} - - for stream, limits in stream_limitations.items(): - max_record_count, records = limits - pipeline_count = len(records) - if (max_record_count - pipeline_count) / max_record_count <= 0.1: # at/above 90% of record limit - delete_count = int(max_record_count / 2) - self.cleanup(stream, records, delete_count) - LOGGER.info(f"TEST CLIENT | {delete_count} records deleted from {stream}") diff --git a/archive/tests/client_tester.py b/archive/tests/client_tester.py deleted file mode 100644 index a03db6d..0000000 --- a/archive/tests/client_tester.py +++ /dev/null @@ -1,280 +0,0 @@ -import json -import time -from client import TestClient -from base import HubspotBaseTest - -class TestHubspotTestClient(HubspotBaseTest): - """ - Test the basic functionality of our Test Client. This is a tool for sanity checks, nothing more. - - To check an individual crud method, uncomment the corresponding test case below, and execute this file - as if it is a normal tap-tester test via bin/run-test. - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.test_client = TestClient(self.get_properties()['start_date']) - - ########################################################################## - ### TESTING CREATES - ########################################################################## - - # def test_contacts_create(self): - # # Testing contacts Post - # old_records = self.test_client.get_contacts() - # our_record = self.test_client.create_contacts() - # new_records = self.test_client.get_contacts() - # assert len(old_records) < len(new_records), \ - # f"Before contacts post found {len(old_records)} records. After post found {len(new_records)} records" - - # def test_contacts_create_stability(self): - # old_records = self.test_client.get_contacts() - # our_record = self.test_client.create_contacts() - # responses = [] - # for i in range(10): - # new_records = self.test_client.get_contacts() - # responses.append(new_records) - # time.sleep(1) - # all_versions = [record['versionTimestamp'] for response in responses - # for record in response if record['vid'] == our_record[0]['vid']] - # from pprint import pprint as pp - # pp(all_versions) - - # def test_companies_create(self): - # # Testing companies Post - - # old_records = self.test_client.get_companies('2021-08-25T00:00:00.000000Z') - # our_record = self.test_client.create_companies() - # now = time.time() - # time.sleep(6) - - # new_records = self.test_client.get_companies('2021-08-25T00:00:00.000000Z') - # time_for_get = time.time()-now - # print(time_for_get) - - # assert len(old_records) < len(new_records), \ - # f"Before companies post found {len(old_records)} records. After post found {len(new_records)} records" - - # def test_contact_lists_create(self): - # # Testing contact_lists POST - - # old_records = self.test_client.get_contact_lists() - # our_record = self.test_client.create_contact_lists() - # new_records = self.test_client.get_contact_lists() - - # assert len(old_records) < len(new_records), \ - # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" - - - # def test_contacts_by_company_create(self): - # # Testing contacts_by_company PUT - - - # old_contact_records = self.test_client.get_contacts() - # old_company_records = self.test_client.get_companies('2021-08-25T00:00:00.000000Z') - # old_records = self.test_client.get_contacts_by_company([old_company_records[0]["companyId"]]) - # our_record = self.test_client.create_contacts_by_company() - # new_records = self.test_client.get_contacts_by_company([old_company_records[0]["companyId"]]) - # assert len(old_records) < len(new_records), \ - # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" - - - # def test_deal_pipelines_create(self): - # # Testing deal_pipelines POST - - # old_records = self.test_client.get_deal_pipelines() - # our_record = self.test_client.create_deal_pipelines() - # new_records = self.test_client.get_deal_pipelines() - # assert len(old_records) < len(new_records), \ - # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" - - # def test_deal_pipelines_deletes(self): - # # Testing deal_pipelines DELETE - # import ipdb; ipdb.set_trace() - # 1+1 - # our_record = self.test_client.create_deal_pipelines() - # old_records = self.test_client.get_deal_pipelines() - # delete_records = self.test_client.delete_deal_pipelines(1) - # new_records = self.test_client.get_deal_pipelines() - # assert len(old_records) > len(new_records), \ - # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" - - # def test_deals_create(self): - # # Testing deals POST - - # old_records = self.test_client.get_deals() - # our_record = self.test_client.create_deals() - # new_records = self.test_client.get_deals() - # assert len(old_records) < len(new_records), \ - # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" - - - # def test_subscription_changes_and_email_events_create(self): - # # Testing subscription_changes and email_events POST - - # old_emails = self.test_client.get_email_events() - # old_subs = self.test_client.get_subscription_changes() - # our_record = self.test_client.create_subscription_changes() - # time.sleep(10) - # new_subs = self.test_client.get_subscription_changes() - # new_emails = self.test_client.get_email_events() - - # assert len(old_subs) < len(new_subs), \ - # f"Before post found {len(old_subs)} subs. After post found {len(new_subs)} subs" - # assert len(old_emails) < len(new_emails), \ - # f"Before post found {len(old_emails)} emails. After post found {len(new_emails)} emails" - # print(f"Before {len(old_subs)} subs. After found {len(new_subs)} subs") - # print(f"Before {len(old_emails)} emails. After found {len(new_emails)} emails") - - # def test_engagements_create(self): - # # Testing create_engagements POST - - # old_records = self.test_client.get_engagements() - # our_record = self.test_client.create_engagements() - # new_records = self.test_client.get_engagements() - # assert len(old_records) < len(new_records), \ - # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" - - - # def test_forms_create(self): - # # Testing create_forms POST - # old_records = self.test_client.get_forms() - # our_record = self.test_client.create_forms() - # new_records = self.test_client.get_forms() - # assert len(old_records) < len(new_records), \ - # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" - - - # def test_workflows_create(self): - # # Testing create_workflows POST - - # old_records = self.test_client.get_workflows() - # our_record = self.test_client.create_workflows() - # new_records = self.test_client.get_workflows() - # assert len(old_records) < len(new_records), \ - # f"Before post found {len(old_records)} records. After post found {len(new_records)} records" - - - ########################################################################## - ### TESTING UPDATES - ########################################################################## - - - # def test_workflows_update(self): # TODO This failed to change the record - # # Testing update_workflows POST - - # # grab a contact's email to use as the update - # contacts = self.test_client.get_contacts() - # for contact in contacts: - # if contact['properties'].get('email'): - # contact_email = contact['properties']['email']['value'] - # break - - # # old - # workflow = self.test_client.create('workflows') - # workflow_id = workflow[0]['id'] - # old_record = self.test_client._get_workflows_by_pk(workflow_id=workflow_id) - - - # # do the update - # our_record = self.test_client.update_workflows(workflow_id=workflow_id, contact_email=contact_email) - - # # new - # new_record = self.test_client._get_workflows_by_pk(workflow_id=workflow_id) - - # self.assertNotEqual(old_record, new_record) - - # def test_contacts_update(self): - # new_record = self.test_client.create_contacts() - # record_vid = new_record[0]['vid'] - # old_email = new_record[0]['properties']['email']['value'] - - # updated_record = self.test_client.update_contacts(record_vid) - - # self.assertNotEqual(updated_record[0]['properties']['email']['value'], old_email) - - # def test_campaigns_update(self): TODO - # """No endpoint found.""" - # self.fail("test_campaigns_update not implmented") - - # def test_companies_update(self): - # initial_record = self.test_client.create_companies() - # time.sleep(6) - # record_id = initial_record[0]['companyId'] - # initial_value = initial_record[0]['properties']['description']['value'] - - # updated_record = self.test_client.update_companies(record_id) - # updated_value = updated_record['properties']['description']['value'] - - # self.assertNotEqual(initial_value, updated_value) - - # def test_contact_lists_update(self): - # initial_record = self.test_client.create_contact_lists() - - # record_id = initial_record[0]['listId'] - # initial_value = initial_record[0]['name'] - - # updated_record = self.test_client.update_contact_lists(record_id) - # updated_value = updated_record['name'] - - # self.assertNotEqual(initial_value, updated_value) - - # def test_deal_pipelines_update(self): - # initial_record = self.test_client.get_deal_pipelines() - - # record_id = initial_record[0]['pipelineId'] - # initial_value = initial_record[0]['label'] - - # updated_record = self.test_client.update_deal_pipelines(record_id) - # updated_value = updated_record['label'] - - # self.assertNotEqual(initial_value, updated_value) - - # def test_deals_update(self): - # initial_record = self.test_client.get_deals() - - # record_id = initial_record[0]['dealId'] - # initial_value = initial_record[0]['properties']['dealname']['value'] - - # updated_record = self.test_client.update_deals(record_id) - # updated_value = updated_record['properties']['dealname']['value'] - - # self.assertNotEqual(initial_value, updated_value) - - # def test_forms_update(self): - # initial_record = self.test_client.get_forms() - - # record_id = initial_record[0]['guid'] - # initial_value = initial_record[0]['name'] - - # updated_record = self.test_client.update_forms(record_id) - # updated_value = updated_record['name'] - - # self.assertNotEqual(initial_value, updated_value) - - # def test_owners_update(self): TODO - # """No endpoint found.""" - # self.fail("test_owners_update not implmented") - - # def test_engagements_update(self): - # initial_record = self.test_client.get_engagements() - - # record_id = initial_record[0]['engagement_id'] - # initial_value = initial_record[0]['metadata'] - - # updated_record = self.test_client.update_engagements(record_id) - # updated_value = updated_record['metadata'] - - # self.assertNotEqual(initial_value, updated_value) - - ########################################################################## - ### TODO updates - ########################################################################## - # def test_contacts_by_company_update(self): - # pass - - # def test_email_events_update(self): - # pass - - - # def test_subscription_changes_update(self): - # pass diff --git a/archive/tests/test_hubspot_all_fields.py b/archive/tests/test_hubspot_all_fields.py deleted file mode 100644 index 2693fa7..0000000 --- a/archive/tests/test_hubspot_all_fields.py +++ /dev/null @@ -1,327 +0,0 @@ -import datetime - -import tap_tester.connections as connections -import tap_tester.menagerie as menagerie -import tap_tester.runner as runner -from tap_tester import LOGGER - -from base import HubspotBaseTest -from client import TestClient - -def get_matching_actual_record_by_pk(expected_primary_key_dict, actual_records): - ret_records = [] - can_save = True - for record in actual_records: - for key, value in expected_primary_key_dict.items(): - actual_value = record[key] - if actual_value != value: - can_save = False - break - if can_save: - ret_records.append(record) - can_save = True - return ret_records - -FIELDS_ADDED_BY_TAP = { - # In 'contacts' streams 'versionTimeStamp' is not available in response of the second call. - # In the 1st call, Tap retrieves records of all contacts and from those records, it collects vids(id of contact). - # These records contain the versionTimestamp field. - # In the 2nd call, vids collected from the 1st call will be used to retrieve the whole contact record. - # Here, the records collected for detailed contact information do not contain the versionTimestamp field. - # So, we add the versionTimestamp field(fetched from 1st call records) explicitly in the record of 2nd call. - "contacts": { "versionTimestamp" } -} - -KNOWN_EXTRA_FIELDS = { - 'deals': { - # BUG_TDL-14993 | https://jira.talendforge.org/browse/TDL-14993 - # Has an value of object with key 'value' and value 'Null' - 'property_hs_date_entered_1258834', - }, -} - -KNOWN_MISSING_FIELDS = { - 'contacts':{ # BUG https://jira.talendforge.org/browse/TDL-16016 - 'property_hs_latest_source_data_2', - 'property_hs_latest_source', - 'property_hs_latest_source_data_1', - 'property_hs_timezone', - 'property_hs_latest_source_timestamp', - }, - 'contact_lists': { # BUG https://jira.talendforge.org/browse/TDL-14996 - 'authorId', - 'teamIds', - 'internal', - 'ilsFilterBranch', - 'limitExempt', - }, - 'email_events': { # BUG https://jira.talendforge.org/browse/TDL-14997 - 'portalSubscriptionStatus', - 'attempt', - 'source', - 'subscriptions', - 'sourceId', - 'replyTo', - 'suppressedMessage', - 'bcc', - 'suppressedReason', - 'cc', - }, - 'engagements': { # BUG https://jira.talendforge.org/browse/TDL-14997 - 'scheduledTasks', - }, - 'workflows': { # BUG https://jira.talendforge.org/browse/TDL-14998 - 'migrationStatus', - 'updateSource', - 'description', - 'originalAuthorUserId', - 'lastUpdatedByUserId', - 'creationSource', - 'portalId', - 'contactCounts', - }, - 'owners': { # BUG https://jira.talendforge.org/browse/TDL-15000 - 'activeSalesforceId' - }, - 'forms': { # BUG https://jira.talendforge.org/browse/TDL-15001 - 'alwaysCreateNewCompany', - 'themeColor', - 'publishAt', - 'editVersion', - 'embedVersion', - 'themeName', - 'style', - 'thankYouMessageJson', - 'createMarketableContact', - 'kickbackEmailWorkflowId', - 'businessUnitId', - 'portableKey', - 'parentId', - 'kickbackEmailsJson', - 'unpublishAt', - 'internalUpdatedAt', - 'multivariateTest', - 'publishedAt', - 'customUid', - 'isPublished', - 'paymentSessionTemplateIds', - 'selectedExternalOptions', - }, - 'companies': { # BUG https://jira.talendforge.org/browse/TDL-15003 - 'mergeAudits', - 'stateChanges', - 'isDeleted', - 'additionalDomains', - 'property_hs_analytics_latest_source', - 'property_hs_analytics_latest_source_data_2', - 'property_hs_analytics_latest_source_data_1', - 'property_hs_analytics_latest_source_timestamp', - }, - 'campaigns': { # BUG https://jira.talendforge.org/browse/TDL-15003 - 'lastProcessingStateChangeAt', - 'lastProcessingFinishedAt', - 'processingState', - 'lastProcessingStartedAt', - }, - 'deals': { # BUG https://jira.talendforge.org/browse/TDL-14999 - 'imports', - 'property_hs_num_associated_deal_splits', - 'property_hs_is_deal_split', - 'stateChanges', - 'property_hs_num_associated_active_deal_registrations', - 'property_hs_num_associated_deal_registrations', - 'property_hs_analytics_latest_source', - 'property_hs_analytics_latest_source_timestamp_contact', - 'property_hs_analytics_latest_source_data_1_contact', - 'property_hs_analytics_latest_source_timestamp', - 'property_hs_analytics_latest_source_data_1', - 'property_hs_analytics_latest_source_contact', - 'property_hs_analytics_latest_source_company', - 'property_hs_analytics_latest_source_data_1_company', - 'property_hs_analytics_latest_source_data_2_company', - 'property_hs_analytics_latest_source_data_2', - 'property_hs_analytics_latest_source_data_2_contact', - }, - 'subscription_changes':{ - 'normalizedEmailId' - } -} - - -class TestHubspotAllFields(HubspotBaseTest): - """Test that with all fields selected for a stream we replicate data as expected""" - @staticmethod - def name(): - return "tt_hubspot_all_fields_dynamic" - - def streams_under_test(self): - """expected streams minus the streams not under test""" - return self.expected_streams().difference({ - 'owners', - 'subscription_changes', # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 - }) - - def setUp(self): - self.maxDiff = None # see all output in failure - - test_client = TestClient(start_date=self.get_properties()['start_date']) - self.expected_records = dict() - streams = self.streams_under_test() - stream_to_run_last = 'contacts_by_company' - if stream_to_run_last in streams: - streams.remove(stream_to_run_last) - streams = list(streams) - streams.append(stream_to_run_last) - - for stream in streams: - # Get all records - if stream == 'contacts_by_company': - company_ids = [company['companyId'] for company in self.expected_records['companies']] - self.expected_records[stream] = test_client.read(stream, parent_ids=company_ids) - else: - self.expected_records[stream] = test_client.read(stream) - - for stream, records in self.expected_records.items(): - LOGGER.info("The test client found %s %s records.", len(records), stream) - - - self.convert_datatype(self.expected_records) - - def convert_datatype(self, expected_records): - for stream, records in expected_records.items(): - for record in records: - - # convert timestamps to string formatted datetime - timestamp_keys = {'timestamp'} - for key in timestamp_keys: - timestamp = record.get(key) - if timestamp: - unformatted = datetime.datetime.fromtimestamp(timestamp/1000) - formatted = datetime.datetime.strftime(unformatted, self.BASIC_DATE_FORMAT) - record[key] = formatted - - return expected_records - - def test_run(self): - conn_id = connections.ensure_connection(self) - - found_catalogs = self.run_and_verify_check_mode(conn_id) - - # Select only the expected streams tables - expected_streams = self.streams_under_test() - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] - for catalog_entry in catalog_entries: - stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) - connections.select_catalog_and_fields_via_metadata( - conn_id, - catalog_entry, - stream_schema - ) - - # Run sync - first_record_count_by_stream = self.run_and_verify_sync(conn_id) - synced_records = runner.get_records_from_target_output() - - # Test by Stream - for stream in expected_streams: - with self.subTest(stream=stream): - - # gather expected values - replication_method = self.expected_replication_method()[stream] - primary_keys = sorted(self.expected_primary_keys()[stream]) - - # gather replicated records - actual_records = [message['data'] - for message in synced_records[stream]['messages'] - if message['action'] == 'upsert'] - - for expected_record in self.expected_records[stream]: - - primary_key_dict = {primary_key: expected_record[primary_key] for primary_key in primary_keys} - primary_key_values = list(primary_key_dict.values()) - - with self.subTest(expected_record=primary_key_dict): - # grab the replicated record that corresponds to expected_record by checking primary keys - matching_actual_records_by_pk = get_matching_actual_record_by_pk(primary_key_dict, actual_records) - if not matching_actual_records_by_pk: - LOGGER.warn("Expected %s record was not replicated: %s", - stream, primary_key_dict) - continue # skip this expected record if it isn't replicated - actual_record = matching_actual_records_by_pk[0] - - expected_keys = set(expected_record.keys()).union(FIELDS_ADDED_BY_TAP.get(stream, {})) - actual_keys = set(actual_record.keys()) - - # NB: KNOWN_MISSING_FIELDS is a dictionary of streams to aggregated missing fields. - # We will check each expected_record to see which of the known keys is present in expectations - # and then will add them to the known_missing_keys set. - known_missing_keys = set() - for missing_key in KNOWN_MISSING_FIELDS.get(stream, set()): - if missing_key in expected_record.keys(): - known_missing_keys.add(missing_key) - del expected_record[missing_key] - - # NB : KNOWN_EXTRA_FIELDS is a dictionary of streams to fields that should not - # be replicated but are. See the variable declaration at top of file for linked BUGs. - known_extra_keys = set() - for extra_key in KNOWN_EXTRA_FIELDS.get(stream, set()): - known_extra_keys.add(extra_key) - - # Verify the fields in our expected record match the fields in the corresponding replicated record - expected_keys_adjusted = expected_keys.union(known_extra_keys) - actual_keys_adjusted = actual_keys.union(known_missing_keys) - - # NB: The following woraround is for dynamic fields on the `deals` stream that we just can't track. - # At the time of implementation there is no customer feedback indicating that these dynamic fields - # would prove useful to an end user. The ones that we replicated with the test client are specific - # to our test data. We have determined that the filtering of these fields is an expected behavior. - - # deals workaround for 'property_hs_date_entered_' fields - bad_key_prefixes = {'property_hs_date_entered_', 'property_hs_date_exited_'} - bad_keys = set() - for key in expected_keys_adjusted: - for prefix in bad_key_prefixes: - if key.startswith(prefix) and key not in actual_keys_adjusted: - bad_keys.add(key) - for key in actual_keys_adjusted: - for prefix in bad_key_prefixes: - if key.startswith(prefix) and key not in expected_keys_adjusted: - bad_keys.add(key) - for key in bad_keys: - if key in expected_keys_adjusted: - expected_keys_adjusted.remove(key) - elif key in actual_keys_adjusted: - actual_keys_adjusted.remove(key) - - self.assertSetEqual(expected_keys_adjusted, actual_keys_adjusted) - - # Future Testing | TDL-16145 - # self.assertDictEqual(expected_record, actual_record) - - # Toss out a warn if tap is replicating more than the expected records were replicated - expected_primary_key_values = {tuple([record[primary_key] - for primary_key in primary_keys]) - for record in self.expected_records[stream]} - actual_records_primary_key_values = {tuple([record[primary_key] - for primary_key in primary_keys]) - for record in actual_records} - if expected_primary_key_values.issubset(actual_records_primary_key_values): - LOGGER.warn("Unexpected %s records replicated: %s", - stream, - actual_records_primary_key_values - expected_primary_key_values) - - -class TestHubspotAllFieldsStatic(TestHubspotAllFields): - @staticmethod - def name(): - return "tt_hubspot_all_fields_static" - - def streams_under_test(self): - """expected streams minus the streams not under test""" - return { - 'owners', - # 'subscription_changes', # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 - } - - def get_properties(self): - return {'start_date' : '2021-05-02T00:00:00Z'} diff --git a/archive/tests/test_hubspot_automatic_fields.py b/archive/tests/test_hubspot_automatic_fields.py deleted file mode 100644 index 693f5ff..0000000 --- a/archive/tests/test_hubspot_automatic_fields.py +++ /dev/null @@ -1,109 +0,0 @@ -import tap_tester.connections as connections -import tap_tester.menagerie as menagerie -import tap_tester.runner as runner -import re - -from base import HubspotBaseTest - -STATIC_DATA_STREAMS = {'owners'} - -class TestHubspotAutomaticFields(HubspotBaseTest): - @staticmethod - def name(): - return "tt_hubspot_automatic" - - def streams_to_test(self): - """streams to test""" - return self.expected_streams() - STATIC_DATA_STREAMS - - def test_run(self): - """ - Verify we can deselect all fields except when inclusion=automatic, which is handled by base.py methods - Verify that only the automatic fields are sent to the target. - """ - conn_id = connections.ensure_connection(self) - found_catalogs = self.run_and_verify_check_mode(conn_id) - - # Select only the expected streams tables - expected_streams = self.streams_to_test() - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] - self.select_all_streams_and_fields(conn_id, catalog_entries, select_all_fields=False) - - # Include the following step in this test if/when hubspot conforms to the standards of metadata - # See bugs BUG_TDL-9939 and BUG_TDL-14938 - - # # Verify our selection resulted in no fields selected except for those with inclusion of 'automatic' - # catalogs_selection = menagerie.get_catalogs(conn_id) - # for cat in catalogs_selection: - # with self.subTest(cat=cat): - # catalog_entry = menagerie.get_annotated_schema(conn_id, cat['stream_id']) - - # # Verify the expected stream tables are selected - # selected = catalog_entry.get('annotated-schema').get('selected') - # print("Validating selection on {}: {}".format(cat['stream_name'], selected)) - # if cat['stream_name'] not in expected_streams: - # self.assertFalse(selected, msg="Stream selected, but not testable.") - # continue # Skip remaining assertions if we aren't selecting this stream - # self.assertTrue(selected, msg="Stream not selected.") - - # # Verify only automatic fields are selected - # expected_automatic_fields = self.expected_automatic_fields().get(cat['tap_stream_id']) - # selected_fields = self.get_selected_fields_from_metadata(catalog_entry['metadata']) - - # # remove replication keys - # self.assertEqual(expected_automatic_fields, selected_fields) - - # Run a sync job using orchestrator - sync_record_count = self.run_and_verify_sync(conn_id) - synced_records = runner.get_records_from_target_output() - - # Assert the records for each stream - for stream in expected_streams: - with self.subTest(stream=stream): - - # Verify that data is present - record_count = sync_record_count.get(stream, 0) - self.assertGreater(record_count, 0) - - data = synced_records.get(stream) - record_messages_keys = [set(row['data'].keys()) for row in data['messages']] - expected_keys = self.expected_automatic_fields().get(stream) - - # BUG_TDL-9939 https://jira.talendforge.org/browse/TDL-9939 Replication keys are not included as an automatic field for these streams - if stream in {'subscription_changes', 'email_events'}: - # replication keys not in the expected_keys - remove_keys = self.expected_metadata()[stream].get(self.REPLICATION_KEYS) - expected_keys = expected_keys.difference(remove_keys) - elif stream in {'engagements'}: - # engagements has a nested object 'engagement' with the automatic fields - expected_keys = expected_keys.union({'engagement'}) - # Verify that only the automatic fields are sent to the target - for actual_keys in record_messages_keys: - self.assertSetEqual(actual_keys, expected_keys, - msg=f"Expected automatic fields: {expected_keys} and nothing else." - ) - - - # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 - # The subscription_changes stream does not have a valid pk to ensure no dupes are sent - if stream != 'subscription_changes': - - # make sure there are no duplicate records by using the pks - pk = self.expected_primary_keys()[stream] - pks_values = [tuple([message['data'][p] for p in pk]) for message in data['messages']] - self.assertEqual(len(pks_values), len(set(pks_values))) - - -class TestHubspotAutomaticFieldsStaticData(TestHubspotAutomaticFields): - def streams_to_test(self): - """streams to test""" - return STATIC_DATA_STREAMS - - @staticmethod - def name(): - return "tt_hubspot_automatic_static" - - def get_properties(self): - return { - 'start_date' : '2021-08-19T00:00:00Z', - } diff --git a/archive/tests/test_hubspot_bookmarks.py b/archive/tests/test_hubspot_bookmarks.py deleted file mode 100644 index fa8a11f..0000000 --- a/archive/tests/test_hubspot_bookmarks.py +++ /dev/null @@ -1,248 +0,0 @@ -from datetime import datetime, timedelta -from time import sleep - - -import tap_tester.connections as connections -import tap_tester.menagerie as menagerie -import tap_tester.runner as runner - -from base import HubspotBaseTest -from client import TestClient - - -STREAMS_WITHOUT_UPDATES = {'email_events', 'contacts_by_company', 'workflows'} -STREAMS_WITHOUT_CREATES = {'campaigns', 'owners'} - - -class TestHubspotBookmarks(HubspotBaseTest): - """Ensure tap replicates new and upated records based on the replication method of a given stream. - - Create records for each stream. Run check mode, perform table and field selection, and run a sync. - Create 1 record for each stream and update 1 record for each stream prior to running a 2nd sync. - - Verify for each incremental stream you can do a sync which records bookmarks, and that the format matches expectations. - - Verify that a bookmark doesn't exist for full table streams. - - Verify the bookmark is the max value sent to the target for the a given replication key. - - Verify 2nd sync respects the bookmark. - """ - @staticmethod - def name(): - return "tt_hubspot_bookmarks" - - def streams_to_test(self): - """expected streams minus the streams not under test""" - - expected_streams = self.expected_streams().difference(STREAMS_WITHOUT_CREATES) - - return expected_streams.difference({ - 'subscription_changes', # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 - }) - - def get_properties(self): - return { - 'start_date' : datetime.strftime(datetime.today()-timedelta(days=3), self.START_DATE_FORMAT), - } - - def setUp(self): - self.maxDiff = None # see all output in failure - - self.test_client = TestClient(self.get_properties()['start_date']) - - def create_test_data(self, expected_streams): - - self.expected_records = {stream: [] - for stream in expected_streams} - - for stream in expected_streams - {'contacts_by_company'}: - if stream == 'email_events': - email_records = self.test_client.create(stream, times=3) - self.expected_records['email_events'] += email_records - else: - # create records, one will be updated between syncs - for _ in range(3): - record = self.test_client.create(stream) - self.expected_records[stream] += record - - if 'contacts_by_company' in expected_streams: # do last - company_ids = [record['companyId'] for record in self.expected_records['companies']] - contact_records = self.expected_records['contacts'] - for i in range(3): - record = self.test_client.create_contacts_by_company( - company_ids=company_ids, contact_records=contact_records - ) - self.expected_records['contacts_by_company'] += record - - def test_run(self): - expected_streams = self.streams_to_test() - - # generate 3 records for every stream that has a create endpoint - create_streams = expected_streams - STREAMS_WITHOUT_CREATES - self.create_test_data(create_streams) - - conn_id = connections.ensure_connection(self) - - found_catalogs = self.run_and_verify_check_mode(conn_id) - - # Select only the expected streams tables - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] - for catalog_entry in catalog_entries: - stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) - connections.select_catalog_and_fields_via_metadata( - conn_id, - catalog_entry, - stream_schema - ) - - # Run sync 1 - first_record_count_by_stream = self.run_and_verify_sync(conn_id) - synced_records = runner.get_records_from_target_output() - state_1 = menagerie.get_state(conn_id) - - # Create 1 record for each stream between syncs - for stream in expected_streams - {'contacts_by_company'}: - record = self.test_client.create(stream) - self.expected_records[stream] += record - if 'contacts_by_company' in expected_streams: - company_ids = [record['companyId'] for record in self.expected_records['companies'][:-1]] - contact_records = self.expected_records['contacts'][-1:] - record = self.test_client.create_contacts_by_company( - company_ids=company_ids, contact_records=contact_records - ) - self.expected_records['contacts_by_company'] += record - - - # Update 1 record from the test seutp for each stream that has an update endpoint - for stream in expected_streams - STREAMS_WITHOUT_UPDATES: - primary_key = list(self.expected_primary_keys()[stream])[0] - record_id = self.expected_records[stream][0][primary_key] - record = self.test_client.update(stream, record_id) - self.expected_records[stream].append(record) - - #run second sync - second_record_count_by_stream = self.run_and_verify_sync(conn_id) - synced_records_2 = runner.get_records_from_target_output() - state_2 = menagerie.get_state(conn_id) - - # Test by Stream - for stream in expected_streams: - - with self.subTest(stream=stream): - - # gather expected values - replication_method = self.expected_replication_method()[stream] - primary_keys = self.expected_primary_keys()[stream] - - # setting expected records for sync 1 based on the unsorted list of record - # which does not inclue the created record between syncs 1 and 2 - expected_records_1 = self.expected_records[stream][:3] - - # gather replicated records - actual_record_count_2 = second_record_count_by_stream[stream] - actual_records_2 = [message['data'] - for message in synced_records_2[stream]['messages'] - if message['action'] == 'upsert'] - actual_record_count_1 = first_record_count_by_stream[stream] - actual_records_1 = [message['data'] - for message in synced_records[stream]['messages'] - if message['action'] == 'upsert'] - - if self.is_child(stream): # we will set expectations for child streeams based on the parent - - parent_stream = self.expected_metadata()[stream][self.PARENT_STREAM] - parent_replication_method = self.expected_replication_method()[parent_stream] - - if parent_replication_method == self.INCREMENTAL: - - expected_record_count = 1 if stream not in STREAMS_WITHOUT_UPDATES else 2 - expected_records_2 = self.expected_records[stream][-expected_record_count:] - - # verify the record count matches our expectations for a child streams with incremental parents - self.assertGreater(actual_record_count_1, actual_record_count_2) - - elif parent_replication_method == self.FULL: - - # verify the record count matches our expectations for child streams with full table parents - expected_records_2 = self.expected_records[stream] - self.assertEqual(actual_record_count_1 + 1, actual_record_count_2) - - else: - raise AssertionError(f"Replication method is {replication_method} for stream: {stream}") - - - elif replication_method == self.INCREMENTAL: - - # NB: FOR INCREMENTAL STREAMS the tap does not replicate the replication-key for any records. - # It does functionaly replicate as a standard incremental sync would but does not order - # records by replication-key value (since it does not exist on the record). To get around - # this we are putting the replication-keys on our expected records via test_client. We will - # verify the records we expect (via primary-key) are replicated prior to checking the - # replication-key values. - - # get saved states - stream_replication_key = list(self.expected_replication_keys()[stream])[0] - bookmark_1 = state_1['bookmarks'][stream][stream_replication_key] - bookmark_2 = state_2['bookmarks'][stream][stream_replication_key] - - # setting expected records knowing they are ordered by replication-key value - expected_record_count = 1 if stream not in STREAMS_WITHOUT_UPDATES else 2 - expected_records_2 = self.expected_records[stream][-expected_record_count:] - - # Given streams does not contain proper replication-key value in the response. - if stream not in {"companies","deals","contacts_by_company","email_events"}: - # verify first sync bookmark value is max bookmark value - max_bk_value = actual_records_1[0].get(stream_replication_key) - for record in actual_records_1: - replication_key_value = record.get(stream_replication_key) - if max_bk_value < replication_key_value: - max_bk_value = replication_key_value - - # For few streams, test records updated before sync may have replication value - # greater than bookmark value probably due delayed records updates pickup by Hubspot - self.assertLessEqual(bookmark_1, max_bk_value, - msg="First sync bookmark value cannot be greater than max replication-key value") - - # verify second sync bookmark value is max bookmark value - max_bk_value = actual_records_2[0].get(stream_replication_key) - for record in actual_records_2: - replication_key_value = record.get(stream_replication_key) - if max_bk_value < replication_key_value: - max_bk_value = replication_key_value - - # For few streams, test records updated before sync may have replication value - # greater than bookmark value probably due delayed records updates pickup by Hubspot - self.assertLessEqual(bookmark_2, max_bk_value, - msg="Second sync bookmark value cannot be greater than max replication-key value") - - # verify only the new and updated records are captured checking record countx - self.assertGreater(actual_record_count_1, actual_record_count_2) - - # verify the state was updated with incremented bookmark - if stream != 'email_events': # BUG TDL-15706 - self.assertGreater(bookmark_2, bookmark_1) - - elif replication_method == self.FULL: - expected_records_2 = self.expected_records[stream] - self.assertEqual(actual_record_count_1 + 1, actual_record_count_2) - - else: - raise AssertionError(f"Replication method is {replication_method} for stream: {stream}") - - # verify by primary key that all expected records are replicated in sync 1 - sync_1_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_1] - expected_sync_1_pks = [tuple([record[pk] for pk in primary_keys]) - for record in expected_records_1] - for expected_pk in expected_sync_1_pks: - self.assertIn(expected_pk, sync_1_pks) - - # verify by primary key that all expected records are replicated in sync 2 - sync_2_pks = sorted([tuple([record[pk] for pk in primary_keys]) for record in actual_records_2]) - expected_sync_2_pks = sorted([tuple([record[pk] for pk in primary_keys]) - for record in expected_records_2]) - for expected_pk in expected_sync_2_pks: - self.assertIn(expected_pk, sync_2_pks) - - # verify that at least 1 record from the first sync is replicated in the 2nd sync - # to prove that the bookmarking is inclusive - if stream in {'companies', # BUG | https://jira.talendforge.org/browse/TDL-15503 - 'email_events'}: # BUG | https://jira.talendforge.org/browse/TDL-15706 - continue # skipping failures - self.assertTrue(any([expected_pk in sync_2_pks for expected_pk in expected_sync_1_pks])) diff --git a/archive/tests/test_hubspot_bookmarks_static.py b/archive/tests/test_hubspot_bookmarks_static.py deleted file mode 100644 index bbbda3e..0000000 --- a/archive/tests/test_hubspot_bookmarks_static.py +++ /dev/null @@ -1,127 +0,0 @@ -from datetime import datetime, timedelta -from time import sleep -import copy - -import tap_tester.connections as connections -import tap_tester.menagerie as menagerie -import tap_tester.runner as runner - -from base import HubspotBaseTest -from client import TestClient - - -STREAMS_WITHOUT_CREATES = {'campaigns', 'owners'} - - -class TestHubspotBookmarks(HubspotBaseTest): - """Test basic bookmarking and replication for streams that do not have CRUD capability.""" - @staticmethod - def name(): - return "tt_hubspot_bookmarks_static" - - def streams_to_test(self): - """expected streams minus the streams not under test""" - return STREAMS_WITHOUT_CREATES - - def get_properties(self): - # 'start_date' : '2021-08-19T00:00:00Z' - return {'start_date' : '2017-11-22T00:00:00Z'} - - def setUp(self): - self.maxDiff = None # see all output in failure - - - def test_run(self): - expected_streams = self.streams_to_test() - - conn_id = connections.ensure_connection(self) - - found_catalogs = self.run_and_verify_check_mode(conn_id) - - # Select only the expected streams tables - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] - for catalog_entry in catalog_entries: - stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) - connections.select_catalog_and_fields_via_metadata( - conn_id, - catalog_entry, - stream_schema - ) - - # Run sync 1 - first_record_count_by_stream = self.run_and_verify_sync(conn_id) - synced_records = runner.get_records_from_target_output() - state_1 = menagerie.get_state(conn_id) - - # Update state to simulate a bookmark - new_state = copy.deepcopy(state_1) - for stream in state_1['bookmarks'].keys(): - if self.expected_replication_method()[stream] == self.INCREMENTAL: - calculated_bookmark_value = self.timedelta_formatted( - state_1['bookmarks']['owners']['updatedAt'], days=-1, str_format=self.BASIC_DATE_FORMAT - ) - - menagerie.set_state(conn_id, new_state) - - # run second sync - second_record_count_by_stream = self.run_and_verify_sync(conn_id) - synced_records_2 = runner.get_records_from_target_output() - state_2 = menagerie.get_state(conn_id) - - # Test by Stream - for stream in expected_streams: - - with self.subTest(stream=stream): - - # gather expected values - replication_method = self.expected_replication_method()[stream] - primary_keys = self.expected_primary_keys()[stream] - - # gather replicated records - actual_record_count_2 = second_record_count_by_stream[stream] - actual_records_2 = [message['data'] - for message in synced_records_2[stream]['messages'] - if message['action'] == 'upsert'] - actual_record_count_1 = first_record_count_by_stream[stream] - actual_records_1 = [message['data'] - for message in synced_records[stream]['messages'] - if message['action'] == 'upsert'] - - # NB: There are no replication-key values on records and so we cannot confirm that the records, - # replicated respect the bookmark via direct comparison. All we can do is verify syncs correspond - # to the repliaction methods logically by strategically setting the simulated. - - if replication_method == self.INCREMENTAL: - - # get saved states - stream_replication_key = list(self.expected_replication_keys()[stream])[0] - bookmark_1 = state_1['bookmarks'][stream][stream_replication_key] - bookmark_2 = state_2['bookmarks'][stream][stream_replication_key] - - # verify the uninterrupted sync and the simulated sync end with the same bookmark values - self.assertEqual(bookmark_1, bookmark_2) - - # trim records down to just the primary key values - sync_1_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_1] - sync_2_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_2] - # ensure no dupe records present - self.assertCountEqual(set(sync_1_pks), sync_1_pks) - self.assertCountEqual(set(sync_2_pks), sync_2_pks) - - # verify the records from sync 1 are not present in sync 2 as the simulated state - # does not correspond to a specific record's replication-key value - self.assertTrue(set(sync_2_pks).issubset(set(sync_1_pks))) - - # verify there are more records in sync 1 than in sync 2 (proper setup required for this) - self.assertGreater(actual_record_count_1, actual_record_count_2) - - elif replication_method == self.FULL: - - # verify the same number of records were replicated in each sync - self.assertEqual(actual_record_count_1, actual_record_count_2) - - # verify the exact same records were replicated in each sync - self.assertEqual(actual_records_1, actual_records_2) - - else: - raise AssertionError(f"Replication method is {replication_method} for stream: {stream}") diff --git a/archive/tests/test_hubspot_child_stream_only.py b/archive/tests/test_hubspot_child_stream_only.py deleted file mode 100644 index a1ffc42..0000000 --- a/archive/tests/test_hubspot_child_stream_only.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Test tap field selection of child streams without its parent.""" -import re -from datetime import datetime as dt -from datetime import timedelta - -from tap_tester import connections -from tap_tester import menagerie -from tap_tester import runner - -from base import HubspotBaseTest -from client import TestClient - - -class FieldSelectionChildTest(HubspotBaseTest): - """Test tap field selection of child streams without its parent.""" - - @staticmethod - def name(): - return "tt_hubspot_child_streams" - - def get_properties(self): - return { - 'start_date' : dt.strftime(dt.today()-timedelta(days=2), self.START_DATE_FORMAT) - } - - def setUp(self): - test_client = TestClient(start_date=self.get_properties()['start_date']) - - contact = test_client.create('contacts') - company = test_client.create('companies')[0] - contact_by_company = test_client.create_contacts_by_company( - company_ids=[company['companyId']], - contact_records=contact - ) - - def test_run(self): - """ - Verify that when a child stream is selected without its parent that - • a critical error in the tap occurs - • the error indicates which parent stream needs to be selected - • when the parent is selected the tap doesn't critical error - """ - streams_to_test = {"contacts_by_company"} - - conn_id = self.create_connection_and_run_check() - - found_catalogs = self.run_and_verify_check_mode(conn_id) - - # Select only the expected streams tables - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in streams_to_test] - - for catalog_entry in catalog_entries: - stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) - connections.select_catalog_and_fields_via_metadata( - conn_id, - catalog_entry, - stream_schema - ) - - # Run a sync job using orchestrator - sync_job_name = runner.run_sync_mode(self, conn_id) - - # Verify tap and target exit codes - exit_status = menagerie.get_exit_status(conn_id, sync_job_name) - - # Verify that the tap error message shows you need to select the parent stream - self.assertRaises(AssertionError, menagerie.verify_sync_exit_status, self, exit_status, sync_job_name) - self.assertEqual(exit_status['tap_error_message'], - ('Unable to extract contacts_by_company data. ' - 'To receive contacts_by_company data, you also need to select companies.')) - - # Verify there is no discovery or target error - self.assertEqual(exit_status['target_exit_status'], 0) - self.assertEqual(exit_status['discovery_exit_status'], 0) - - # Select only child and required parent and make sure there is no critical error - streams_to_test = {"contacts_by_company", "companies"} - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in streams_to_test] - for catalog_entry in catalog_entries: - stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) - connections.select_catalog_and_fields_via_metadata( - conn_id, - catalog_entry, - stream_schema - ) - - # Run a sync job - self.run_and_verify_sync(conn_id) diff --git a/archive/tests/test_hubspot_discovery.py b/archive/tests/test_hubspot_discovery.py deleted file mode 100644 index c61ba04..0000000 --- a/archive/tests/test_hubspot_discovery.py +++ /dev/null @@ -1,131 +0,0 @@ -"""Test tap discovery mode and metadata/annotated-schema.""" -import re - -from tap_tester import menagerie - -from base import HubspotBaseTest - - -class DiscoveryTest(HubspotBaseTest): - """Test tap discovery mode and metadata/annotated-schema conforms to standards.""" - - @staticmethod - def name(): - return "tt_hubspot_discovery" - - def test_run(self): - """ - Verify that discover creates the appropriate catalog, schema, metadata, etc. - - • Verify number of actual streams discovered match expected - • Verify the stream names discovered were what we expect - • Verify stream names follow naming convention - streams should only have lowercase alphas and underscores - • verify there is only 1 top level breadcrumb - • verify replication key(s) - • verify primary key(s) - • verify that if there is a replication key we are doing INCREMENTAL otherwise FULL - • verify the actual replication matches our expected replication method - • verify that primary, replication and foreign keys - are given the inclusion of automatic (metadata and annotated schema). - • verify that all other fields have inclusion of available (metadata and schema) - """ - streams_to_test = self.expected_streams() - - conn_id = self.create_connection_and_run_check() - - found_catalogs = self.run_and_verify_check_mode(conn_id) - - # Verify stream names follow naming convention - # streams should only have lowercase alphas and underscores - found_catalog_names = {c['tap_stream_id'] for c in found_catalogs} - self.assertTrue(all([re.fullmatch(r"[a-z_]+", name) for name in found_catalog_names]), - msg="One or more streams don't follow standard naming") - - for stream in streams_to_test: - with self.subTest(stream=stream): - catalog = next(iter([catalog for catalog in found_catalogs - if catalog["stream_name"] == stream])) - assert catalog # based on previous tests this should always be found - schema_and_metadata = menagerie.get_annotated_schema(conn_id, catalog['stream_id']) - metadata = schema_and_metadata["metadata"] - - # verify there is only 1 top level breadcrumb - stream_properties = [item for item in metadata if item.get("breadcrumb") == []] - self.assertTrue(len(stream_properties) == 1, - msg=f"There is NOT only one top level breadcrumb for {stream}" + \ - "\nstream_properties | {stream_properties}") - - # verify replication key(s) - actual_rep_keys = set(stream_properties[0].get( - "metadata", {self.REPLICATION_KEYS: None}).get( - self.REPLICATION_KEYS, [])) - self.assertEqual( - set(stream_properties[0].get( - "metadata", {self.REPLICATION_KEYS: []}).get(self.REPLICATION_KEYS, [])), - self.expected_replication_keys()[stream], - msg=f"expected replication key {self.expected_replication_keys()[stream]} but actual is {actual_rep_keys}" - ) - - - # verify primary key(s) - actual_primary_keys = set(stream_properties[0].get( "metadata", {self.PRIMARY_KEYS: []}).get(self.PRIMARY_KEYS, [])) - self.assertSetEqual(self.expected_primary_keys()[stream], actual_primary_keys, - msg=f"expected primary key {self.expected_primary_keys()[stream]} but actual is {actual_primary_keys}" - #set(stream_properties[0].get('metadata', {self.PRIMARY_KEYS: None}).get(self.PRIMARY_KEYS, [])))}" - - ) - actual_replication_method = stream_properties[0]['metadata'].get('forced-replication-method') - # BUG https://jira.talendforge.org/browse/TDL-9939 all streams are set to full-table in the metadata - # verify the actual replication matches our expected replication method - if stream == "contacts": - self.assertEqual( - self.expected_replication_method().get(stream, None), - actual_replication_method, - msg="The actual replication method {} doesn't match the expected {}".format( - actual_replication_method, - self.expected_replication_method().get(stream, None))) - - # verify that if there is a replication key we are doing INCREMENTAL otherwise FULL - actual_replication_method = stream_properties[0].get( - "metadata", {self.REPLICATION_METHOD: None}).get(self.REPLICATION_METHOD) - if stream_properties[0].get( - "metadata", {self.REPLICATION_KEYS: []}).get(self.REPLICATION_KEYS, []): - - if stream in ["contacts", "companies", "deals"]: - self.assertTrue(actual_replication_method == self.INCREMENTAL, - msg="Expected INCREMENTAL replication " - "since there is a replication key") - else: - # BUG_TDL-9939 https://jira.talendforge.org/browse/TDL-9939 all streams are set to full table - pass # BUG TDL-9939 REMOVE ME WHEN BUG IS ADDRESSED - - else: - self.assertTrue(actual_replication_method == self.FULL, - msg="Expected FULL replication " - "since there is no replication key") - - expected_primary_keys = self.expected_primary_keys()[stream] - expected_replication_keys = self.expected_replication_keys()[stream] - expected_automatic_fields = expected_primary_keys | expected_replication_keys - - # verify that primary, replication and foreign keys are given the inclusion of automatic in metadata. - # BUG_2 https://jira.talendforge.org/browse/TDL-9772 'inclusion' is not present for replication keys - actual_automatic_fields = {item.get("breadcrumb", ["properties", None])[1] - for item in metadata - if item.get("metadata").get("inclusion") == "automatic"} - if stream in ["contacts", "companies", "deals"]: - self.assertEqual(expected_automatic_fields, - actual_automatic_fields, - msg=f"expected {expected_automatic_fields} automatic fields but got {actual_automatic_fields}" - ) - - # verify that all other fields have inclusion of available - # This assumes there are no unsupported fields for SaaS sources - self.assertTrue( - all({item.get("metadata").get("inclusion") == "available" - for item in metadata - if item.get("breadcrumb", []) != [] - and item.get("breadcrumb", ["properties", None])[1] - not in actual_automatic_fields}), - msg="Not all non key properties are set to available in metadata") diff --git a/archive/tests/test_hubspot_interrupted_sync.py b/archive/tests/test_hubspot_interrupted_sync.py deleted file mode 100644 index 61725ab..0000000 --- a/archive/tests/test_hubspot_interrupted_sync.py +++ /dev/null @@ -1,142 +0,0 @@ -from datetime import datetime, timedelta -from time import sleep -import copy - -import tap_tester.connections as connections -import tap_tester.menagerie as menagerie -import tap_tester.runner as runner - -from base import HubspotBaseTest -from client import TestClient - - -class TestHubspotInterruptedSync1(HubspotBaseTest): - """Testing interrupted syncs for streams that implement unique bookmarking logic.""" - @staticmethod - def name(): - return "tt_hubspot_sync_interrupt_1" - - def streams_to_test(self): - """expected streams minus the streams not under test""" - return {'companies', 'engagements', 'tickets'} - - def simulated_interruption(self, reference_state): - - new_state = copy.deepcopy(reference_state) - - companies_bookmark = self.timedelta_formatted( - reference_state['bookmarks']['companies']['property_hs_lastmodifieddate'], - days=-1, str_format=self.BASIC_DATE_FORMAT - ) - new_state['bookmarks']['companies']['property_hs_lastmodifieddate'] = None - new_state['bookmarks']['companies']['current_sync_start'] = companies_bookmark - - engagements_bookmark = self.timedelta_formatted( - reference_state['bookmarks']['engagements']['lastUpdated'], - days=-1, str_format=self.BASIC_DATE_FORMAT - ) - new_state['bookmarks']['engagements']['lastUpdated'] = None - new_state['bookmarks']['engagements']['current_sync_start'] = engagements_bookmark - - tickets_bookmark = self.timedelta_formatted( - reference_state['bookmarks']['tickets']['updatedAt'], - days=-1, str_format=self.BASIC_DATE_FORMAT) - new_state['bookmarks']['tickets']['updatedAt'] = tickets_bookmark - - return new_state - - def get_properties(self): - # 'start_date' : '2021-08-19T00:00:00Z' - # return {'start_date' : '2017-11-22T00:00:00Z'} - return { - 'start_date' : datetime.strftime( - datetime.today()-timedelta(days=3), self.START_DATE_FORMAT - ), - } - - def setUp(self): - self.maxDiff = None # see all output in failure - - def test_run(self): - - expected_streams = self.streams_to_test() - - conn_id = connections.ensure_connection(self) - - found_catalogs = self.run_and_verify_check_mode(conn_id) - - # Select only the expected streams tables - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] - for catalog_entry in catalog_entries: - stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) - connections.select_catalog_and_fields_via_metadata( - conn_id, - catalog_entry, - stream_schema - ) - - # Run sync 1 - first_record_count_by_stream = self.run_and_verify_sync(conn_id) - synced_records = runner.get_records_from_target_output() - state_1 = menagerie.get_state(conn_id) - - # Update state to simulate a bookmark - new_state = self.simulated_interruption(state_1) - menagerie.set_state(conn_id, new_state) - - # run second sync - second_record_count_by_stream = self.run_and_verify_sync(conn_id) - synced_records_2 = runner.get_records_from_target_output() - state_2 = menagerie.get_state(conn_id) - - # Test by Stream - for stream in expected_streams: - - with self.subTest(stream=stream): - - # gather expected values - replication_method = self.expected_replication_method()[stream] - primary_keys = self.expected_primary_keys()[stream] - - # gather replicated records - actual_record_count_2 = second_record_count_by_stream[stream] - actual_records_2 = [message['data'] - for message in synced_records_2[stream]['messages'] - if message['action'] == 'upsert'] - actual_record_count_1 = first_record_count_by_stream[stream] - actual_records_1 = [message['data'] - for message in synced_records[stream]['messages'] - if message['action'] == 'upsert'] - - # NB: There are no replication-key values on records and so we cannot confirm that the records, - # replicated respect the bookmark via direct comparison. All we can do is verify syncs correspond - # to the repliaction methods logically by strategically setting the simulated state. - - if replication_method == self.INCREMENTAL: - - # get saved states - stream_replication_key = list(self.expected_replication_keys()[stream])[0] - bookmark_1 = state_1['bookmarks'][stream][stream_replication_key] - bookmark_2 = state_2['bookmarks'][stream][stream_replication_key] - - # BUG_TDL-15782 [tap-hubspot] Failure to recover from interrupted sync (engagements, companies) - if stream in {'companies', 'engagements'}: - continue # skip failng assertions - - # verify the uninterrupted sync and the simulated sync end with the same bookmark values - self.assertEqual(bookmark_1, bookmark_2) - - # trim records down to just the primary key values - sync_1_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_1] - sync_2_pks = [tuple([record[pk] for pk in primary_keys]) for record in actual_records_2] - # ensure no dupe records present - self.assertCountEqual(set(sync_1_pks), sync_1_pks) - self.assertCountEqual(set(sync_2_pks), sync_2_pks) - - # verify the records from sync 1 are not present in sync 2 as the simulated state - # does not correspond to a specific record's replication-key value - self.assertTrue(set(sync_2_pks).issubset(set(sync_1_pks))) - - else: - raise AssertionError(f"Replication method is {replication_method} for stream: {stream}") - diff --git a/archive/tests/test_hubspot_interrupted_sync_offset.py b/archive/tests/test_hubspot_interrupted_sync_offset.py deleted file mode 100644 index 891362b..0000000 --- a/archive/tests/test_hubspot_interrupted_sync_offset.py +++ /dev/null @@ -1,141 +0,0 @@ -from datetime import datetime, timedelta -from time import sleep -import copy - -import tap_tester.connections as connections -import tap_tester.menagerie as menagerie -import tap_tester.runner as runner - -from base import HubspotBaseTest -from client import TestClient - - -class TestHubspotInterruptedSyncOffsetContactLists(HubspotBaseTest): - """Testing interrupted syncs for streams that implement unique bookmarking logic.""" - @staticmethod - def name(): - return "tt_hubspot_interrupt_contact_lists" - - def streams_to_test(self): - """expected streams minus the streams not under test""" - untested = { - # Streams tested elsewhere - 'companies', # covered in TestHubspotInterruptedSync1 - 'engagements', # covered in TestHubspotInterruptedSync1 - # Feature Request | TDL-16095: [tap-hubspot] All incremental - # streams should implement the interruptible sync feature - 'forms', # TDL-16095 - 'owners', # TDL-16095 - 'workflows', # TDL-16095 - # Streams that do not apply - 'deal_pipelines', # interruptible does not apply, child of deals - 'campaigns', # unable to manually find a partial state with our test data - 'email_events', # unable to manually find a partial state with our test data - 'contacts_by_company', # interruptible does not apply, child of 'companies' - 'subscription_changes', # BUG_TDL-14938 - 'tickets' # covered in TestHubspotInterruptedSync1 - } - - return self.expected_streams() - untested - - def stream_to_interrupt(self): - return 'contact_lists' - - def state_to_inject(self): - return {'offset': {'offset': 250}} - - def get_properties(self): - return { - 'start_date' : datetime.strftime( - datetime.today()-timedelta(days=3), self.START_DATE_FORMAT - ), - } - - def setUp(self): - self.maxDiff = None # see all output in failure - - def test_run(self): - - # BUG TDL-16094 [tap-hubspot] `contacts` streams fails to recover from sync interruption - if self.stream_to_interrupt() == 'contacts': - self.skipTest("Skipping contacts TEST! See BUG[TDL-16094]") - - - expected_streams = self.streams_to_test() - - conn_id = connections.ensure_connection(self) - - found_catalogs = self.run_and_verify_check_mode(conn_id) - - # Select only the expected streams tables - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] - for catalog_entry in catalog_entries: - stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) - connections.select_catalog_and_fields_via_metadata( - conn_id, - catalog_entry, - stream_schema - ) - - # Run sync 1 - first_record_count_by_stream = self.run_and_verify_sync(conn_id) - synced_records = runner.get_records_from_target_output() - state_1 = menagerie.get_state(conn_id) - - # Update state to simulate a bookmark - stream = self.stream_to_interrupt() - new_state = copy.deepcopy(state_1) - new_state['bookmarks'][stream] = self.state_to_inject() - new_state['currently_syncing'] = stream - menagerie.set_state(conn_id, new_state) - - # run second sync - second_record_count_by_stream = self.run_and_verify_sync(conn_id) - synced_records_2 = runner.get_records_from_target_output() - state_2 = menagerie.get_state(conn_id) - - # Verify post-iterrupted sync bookmark should be greater than or equal to interrupted sync bookmark - # since newly created test records may get updated while stream is syncing - replication_keys = self.expected_replication_keys() - for stream in state_1.get('bookmarks'): - replication_key = list(replication_keys[stream])[0] - self.assertLessEqual(state_1["bookmarks"][stream].get(replication_key), - state_2["bookmarks"][stream].get(replication_key), - msg="First sync bookmark should not be greater than the second bookmark.") - - -class TestHubspotInterruptedSyncOffsetContacts(TestHubspotInterruptedSyncOffsetContactLists): - """Testing interrupted syncs for streams that implement unique bookmarking logic.""" - @staticmethod - def name(): - return "tt_hubspot_interrupt_contacts" - - def get_properties(self): - return { - 'start_date' : '2021-10-01T00:00:00Z' - } - - - def stream_to_interrupt(self): - return 'contacts' - - def state_to_inject(self): - return {'offset': {'vidOffset': 3502}} - -class TestHubspotInterruptedSyncOffsetDeals(TestHubspotInterruptedSyncOffsetContactLists): - """Testing interrupted syncs for streams that implement unique bookmarking logic.""" - @staticmethod - def name(): - return "tt_hubspot_interrupt_deals" - - def get_properties(self): - return { - 'start_date' : '2021-10-10T00:00:00Z' - } - - def stream_to_interrupt(self): - return 'deals' - - def state_to_inject(self): - return {'property_hs_lastmodifieddate': '2021-10-13T08:32:08.383000Z', - 'offset': {'offset': 3442973342}} diff --git a/archive/tests/test_hubspot_pagination.py b/archive/tests/test_hubspot_pagination.py deleted file mode 100644 index d9a2faa..0000000 --- a/archive/tests/test_hubspot_pagination.py +++ /dev/null @@ -1,140 +0,0 @@ -from datetime import datetime -from datetime import timedelta -import time - -import tap_tester.connections as connections -import tap_tester.menagerie as menagerie -import tap_tester.runner as runner -from tap_tester.logger import LOGGER - -from client import TestClient -from base import HubspotBaseTest - - -class TestHubspotPagination(HubspotBaseTest): - - @staticmethod - def name(): - return "tt_hubspot_pagination" - - def get_properties(self): - return { - 'start_date' : datetime.strftime(datetime.today()-timedelta(days=7), self.START_DATE_FORMAT) - } - - def setUp(self): - self.maxDiff = None # see all output in failure - - # initialize the test client - setup_start = time.perf_counter() - test_client = TestClient(self.get_properties()['start_date']) - - # gather expectations - existing_records = dict() - limits = self.expected_page_limits() - streams = self.streams_to_test() - - # order the creation of test data for streams based on the streams under test - # this is necessary for child streams and streams that share underlying data in hubspot - if 'subscription_changes' in streams and 'email_events' in streams: - streams.remove('email_events') # we get this for free with subscription_changes - stream_to_run_last = 'contacts_by_company' # child stream depends on companyIds, must go last - if stream_to_run_last in streams: - streams.remove(stream_to_run_last) - streams = list(streams) - streams.append(stream_to_run_last) - - # generate test data if necessary, one stream at a time - for stream in streams: - - # Get all records - if stream == 'contacts_by_company': - company_ids = [company['companyId'] for company in existing_records['companies']] - existing_records[stream] = test_client.read(stream, parent_ids=company_ids) - elif stream in {'companies', 'contact_lists', 'subscription_changes', 'engagements', 'email_events'}: - existing_records[stream] = test_client.read(stream) - else: - existing_records[stream] = test_client.read(stream) - - # check if we exceed the pagination limit - LOGGER.info(f"Pagination limit set to - {limits[stream]} and total number of existing record - {len(existing_records[stream])}") - under_target = limits[stream] + 1 - len(existing_records[stream]) - LOGGER.info(f'under_target = {under_target} for {stream}') - - # if we do not exceed the limit generate more data so that we do - if under_target > 0 : - LOGGER.info(f"need to make {under_target} records for {stream} stream") - if stream in {'subscription_changes', 'emails_events'}: - test_client.create(stream, subscriptions=existing_records[stream], times=under_target) - elif stream == 'contacts_by_company': - test_client.create(stream, company_ids, times=under_target) - else: - for i in range(under_target): - # create records to exceed limit - test_client.create(stream) - - setup_end = time.perf_counter() - LOGGER.info(f"Test Client took about {str(setup_end-setup_start).split('.')[0]} seconds") - - def streams_to_test(self): - """ - All streams with limits are under test - """ - streams_with_page_limits = { - stream - for stream, limit in self.expected_page_limits().items() - if limit - } - streams_to_test = streams_with_page_limits.difference({ - # updates for contacts_by_company do not get processed quickly or consistently - # via Hubspot API, unable to guarantee page limit is exceeded - 'contacts_by_company', - 'email_events', - 'subscription_changes', # BUG_TDL-14938 https://jira.talendforge.org/browse/TDL-14938 - }) - - return streams_to_test - - def test_run(self): - # Select only the expected streams tables - expected_streams = self.streams_to_test() - conn_id = connections.ensure_connection(self) - found_catalogs = self.run_and_verify_check_mode(conn_id) - - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] - for catalog_entry in catalog_entries: - stream_schema = menagerie.get_annotated_schema(conn_id, catalog_entry['stream_id']) - connections.select_catalog_and_fields_via_metadata( - conn_id, - catalog_entry, - stream_schema - ) - - sync_record_count = self.run_and_verify_sync(conn_id) - sync_records = runner.get_records_from_target_output() - - - # Test by stream - for stream in expected_streams: - with self.subTest(stream=stream): - - record_count = sync_record_count.get(stream, 0) - - sync_messages = sync_records.get(stream, {'messages': []}).get('messages') - - primary_keys = self.expected_primary_keys().get(stream) - - # Verify the sync meets or exceeds the default record count - stream_page_size = self.expected_page_limits()[stream] - self.assertLess(stream_page_size, record_count) - - # Verify we did not duplicate any records across pages - records_pks_set = {tuple([message.get('data').get(primary_key) - for primary_key in primary_keys]) - for message in sync_messages} - records_pks_list = [tuple([message.get('data').get(primary_key) - for primary_key in primary_keys]) - for message in sync_messages] - # records_pks_list = [message.get('data').get(primary_key) for message in sync_messages] - self.assertCountEqual(records_pks_set, records_pks_list, - msg=f"We have duplicate records for {stream}") diff --git a/archive/tests/test_hubspot_start_date.py b/archive/tests/test_hubspot_start_date.py deleted file mode 100644 index df2ac64..0000000 --- a/archive/tests/test_hubspot_start_date.py +++ /dev/null @@ -1,179 +0,0 @@ -import datetime - -import tap_tester.connections as connections -import tap_tester.menagerie as menagerie -import tap_tester.runner as runner -from tap_tester import LOGGER - -from base import HubspotBaseTest -from client import TestClient - - -STATIC_DATA_STREAMS = {'owners', 'campaigns'} - -class TestHubspotStartDate(HubspotBaseTest): - - @staticmethod - def name(): - return "tt_hubspot_start_date" - - def setUp(self): - """ - Create 1 record for every stream under test, because we must guarantee that - over time there will always be more records in the sync 1 time bin - (of start_date_1 -> now) than there are in the sync 2 time bin (of start_date_2 -> now). - """ - - LOGGER.info("running streams with creates") - streams_under_test = self.expected_streams() - {'email_events'} # we get this for free with subscription_changes - self.my_start_date = self.get_properties()['start_date'] - self.test_client = TestClient(self.my_start_date) - for stream in streams_under_test: - if stream == 'contacts_by_company': - companies_records = self.test_client.read('companies', since=self.my_start_date) - company_ids = [company['companyId'] for company in companies_records] - self.test_client.create(stream, company_ids) - else: - self.test_client.create(stream) - - def expected_streams(self): - """ - If any streams cannot have data generated programmatically, - hardcode start_dates for these streams and run the test twice. - streams tested in TestHubspotStartDateStatic should be removed. - """ - return self.expected_check_streams().difference({ - 'owners', # static test data, covered in separate test - 'campaigns', # static test data, covered in separate test - }) - - - def get_properties(self, original=True): - utc_today = datetime.datetime.strftime( - datetime.datetime.utcnow(), self.START_DATE_FORMAT - ) - - if original: - return { - 'start_date' : self.timedelta_formatted(utc_today, days=-2) - } - else: - return { - 'start_date': utc_today - } - - def test_run(self): - - # SYNC 1 - conn_id = connections.ensure_connection(self) - found_catalogs = self.run_and_verify_check_mode(conn_id) - - # Select only the expected streams tables - expected_streams = self.expected_streams() - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] - self.select_all_streams_and_fields(conn_id, catalog_entries) - - first_record_count_by_stream = self.run_and_verify_sync(conn_id) - first_sync_records = runner.get_records_from_target_output() - - # SYNC 2 - conn_id = connections.ensure_connection(self, original_properties=False) - found_catalogs = self.run_and_verify_check_mode(conn_id) - catalog_entries = [ce for ce in found_catalogs if ce['tap_stream_id'] in expected_streams] - self.select_all_streams_and_fields(conn_id, catalog_entries) - second_record_count_by_stream = self.run_and_verify_sync(conn_id) - second_sync_records = runner.get_records_from_target_output() - - # Test by stream - for stream in self.expected_streams(): - with self.subTest(stream=stream): - - # gather expectations - start_date_1 = self.get_properties()['start_date'] - start_date_2 = self.get_properties(original=False)['start_date'] - primary_keys = self.expected_primary_keys()[stream] - replication_key = list(self.expected_replication_keys()[stream]) - - # gather results - first_sync_count = first_record_count_by_stream.get(stream, 0) - second_sync_count = second_record_count_by_stream.get(stream, 0) - first_sync_messages = first_sync_records.get(stream, {'messages': []}).get('messages') - second_sync_messages = second_sync_records.get(stream, {'messages': []}).get('messages') - first_sync_primary_keys = set(tuple([record['data'][pk] for pk in primary_keys]) - for record in first_sync_messages) - second_sync_primary_keys = set(tuple([record['data'][pk] for pk in primary_keys]) - for record in second_sync_messages) - - if self.expected_metadata()[stream][self.OBEYS_START_DATE]: - - # Verify sync 2 overlaps with sync 1 - self.assertFalse(first_sync_primary_keys.isdisjoint(second_sync_primary_keys), - msg='There should be a shared set of data from start date 2 through sync execution time.') - - # Verify the second sync has less data - self.assertGreater(first_sync_count, second_sync_count) - - # for incrmental streams we can compare records agains the start date - if replication_key and stream not in {'contacts', 'subscription_changes', 'email_events'}: # BUG_TDL-9939 - - # BUG_TDL-9939 replication key is not listed correctly - if stream in {"campaigns", "companies", "contacts_by_company", "deal_pipelines", "deals"}: - # For deals stream, the replication key is already prefixed with 'property_'. - replication_key = [replication_key[0]] if stream in ["deals", "companies"] else [f'property_{replication_key[0]}'] - first_sync_replication_key_values = [record['data'][replication_key[0]]['value'] - for record in first_sync_messages] - second_sync_replication_key_values = [record['data'][replication_key[0]]['value'] - for record in second_sync_messages] - else: - first_sync_replication_key_values = [record['data'][replication_key[0]] for record in first_sync_messages] - second_sync_replication_key_values = [record['data'][replication_key[0]] for record in second_sync_messages] - formatted_start_date_1 = start_date_1.replace('Z', '.000000Z') - formatted_start_date_2 = start_date_2.replace('Z', '.000000Z') - - # Verify the replication key values are greater than or equal to the start date - # for sync 1 - for value in first_sync_replication_key_values: - self.assertGreaterEqual(value, formatted_start_date_1) - # and for sync 2 - for value in second_sync_replication_key_values: - self.assertGreaterEqual(value, formatted_start_date_2) - else: - - # If Start date is not obeyed then verify the syncs are equal - self.assertEqual(first_sync_count, second_sync_count) - self.assertEqual(first_sync_primary_keys, second_sync_primary_keys) - - # Verify records are replicated for both syncs - self.assertGreater(first_sync_count, 0, - msg='start date usage is not confirmed when no records are replicated') - self.assertGreater(second_sync_count, 0, - msg='start date usage is not confirmed when no records are replicated') - - -class TestHubspotStartDateStatic(TestHubspotStartDate): - @staticmethod - def name(): - return "tt_hubspot_start_date_static" - - def expected_streams(self): - """expected streams minus the streams not under test""" - return { - 'owners', - 'campaigns', - } - - def get_properties(self, original=True): - utc_today = datetime.datetime.strftime( - datetime.datetime.utcnow(), self.START_DATE_FORMAT - ) - - if original: - return {'start_date' : '2017-11-22T00:00:00Z'} - - else: - return { - 'start_date' : '2022-02-25T00:00:00Z' - } - - def setUp(self): - LOGGER.info("running streams with no creates") diff --git a/archive/tests/unittests/test_deals.py b/archive/tests/unittests/test_deals.py deleted file mode 100644 index 46b97fe..0000000 --- a/archive/tests/unittests/test_deals.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -Unit tests at the functions need to run `sync_deals` -""" -import os -import unittest -from tap_hubspot import acquire_access_token_from_refresh_token -from tap_hubspot import CONFIG -from tap_hubspot import gen_request -from tap_hubspot import get_url -from tap_hubspot import merge_responses -from tap_hubspot import process_v3_deals_records - - -class TestDeals(unittest.TestCase): - """ - This class gets an access token for the tests to use and then tests - assumptions we have about the tap - """ - def setUp(self): - """ - This functions reads in the variables need to get an access token - """ - CONFIG['redirect_uri'] = os.environ['HUBSPOT_REDIRECT_URI'] - CONFIG['refresh_token'] = os.environ['HUBSPOT_REFRESH_TOKEN'] - CONFIG['client_id'] = os.environ['HUBSPOT_CLIENT_ID'] - CONFIG['client_secret'] = os.environ['HUBSPOT_CLIENT_SECRET'] - - acquire_access_token_from_refresh_token() - - - def test_can_fetch_hs_date_entered_props(self): - """ - This test is written on the assumption that `sync_deals()` calls - `gen_request()` to get records - """ - state = {} - url = get_url('deals_all') - params = {'count': 250, - 'includeAssociations': False, - 'properties' : []} - v3_fields = ['hs_date_entered_appointmentscheduled'] - - records = list( - gen_request(state, 'deals', url, params, 'deals', "hasMore", ["offset"], ["offset"], v3_fields=v3_fields) - ) - - for record in records: - # The test account has a deal stage called "appointment scheduled" - value = record.get('properties',{}).get('hs_date_entered_appointmentscheduled') - error_msg = ('Could not find "hs_date_entered_appointment_scheduled"' - 'in {}').format(record) - self.assertIsNotNone(value, msg=error_msg) - - def test_process_v3_deals_records(self): - self.maxDiff = None - data = [ - {'properties': {'field1': 'value1', - 'field2': 'value2', - 'hs_date_entered_field3': 'value3', - 'hs_date_exited_field4': 'value4',}}, - ] - - expected = [ - {'properties': {'hs_date_entered_field3': {'value': 'value3'}, - 'hs_date_exited_field4': {'value': 'value4'},}}, - ] - - actual = process_v3_deals_records(data) - - self.assertDictEqual(expected[0]['properties'], actual[0]['properties']) - - def test_merge_responses(self): - v1_resp = [ - {'dealId': '1', - 'properties': {'field1': 'value1',}}, - {'dealId': '2', - 'properties': {'field3': 'value3',}}, - ] - - v3_resp = [ - {'id': '1', - 'properties': {'field2': 'value2',}}, - {'id': '2', - 'properties': {'field4': 'value4',}}, - ] - - expected = [ - {'dealId': '1', - 'properties': {'field1': 'value1', - 'field2': 'value2',}}, - {'dealId': '2', - 'properties': {'field3': 'value3', - 'field4': 'value4',}}, - ] - - merge_responses(v1_resp, v3_resp) - - for expected_record in expected: - for actual_record in v1_resp: - if actual_record['dealId'] == expected_record['dealId']: - self.assertDictEqual(expected_record, actual_record) From 307359ed83624caa4a5ef5e97ed2369baf8afb8d Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 13 Jun 2023 16:33:23 +0530 Subject: [PATCH 025/105] added comments and removed functions --- tap_hubspot_sdk/client.py | 62 +--- tap_hubspot_sdk/streams.py | 590 +++++++++++++++++++++++++++++++++++++ tap_hubspot_sdk/tap.py | 4 +- 3 files changed, 598 insertions(+), 58 deletions(-) diff --git a/tap_hubspot_sdk/client.py b/tap_hubspot_sdk/client.py index f61d3ab..42c4b78 100644 --- a/tap_hubspot_sdk/client.py +++ b/tap_hubspot_sdk/client.py @@ -2,15 +2,13 @@ from __future__ import annotations -import json - import sys from pathlib import Path from typing import Any, Callable, Iterable import requests from singer_sdk.helpers.jsonpath import extract_jsonpath -from singer_sdk.pagination import BaseAPIPaginator # noqa: TCH002 +from singer_sdk.pagination import BaseAPIPaginator from singer_sdk.streams import RESTStream from tap_hubspot_sdk.auth import tapHubspotAuthenticator @@ -20,8 +18,7 @@ else: from cached_property import cached_property -from singer_sdk.authenticators import BearerTokenAuthenticator, SimpleAuthenticator, BasicAuthenticator, APIAuthenticatorBase -from requests.auth import HTTPBasicAuth +from singer_sdk.authenticators import BearerTokenAuthenticator, SimpleAuthenticator, APIAuthenticatorBase _Auth = Callable[[requests.PreparedRequest], requests.PreparedRequest] #SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") @@ -32,13 +29,16 @@ class HubspotStream(RESTStream): @property def url_base(self) -> str: + """ + Returns base url + """ base_url = "https://api.hubapi.com/" return base_url records_jsonpath = "$[*]" # Or override `parse_response`. # Set this value or override `get_new_paginator`. - next_page_token_jsonpath = "$.next_page" # noqa: S105 + next_page_token_jsonpath = "$.next_page" @cached_property def authenticator(self) -> _Auth: @@ -92,7 +92,7 @@ def get_new_paginator(self) -> BaseAPIPaginator: def get_url_params( self, - context: dict | None, # noqa: ARG002 + context: dict | None, next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -111,52 +111,4 @@ def get_url_params( params["sort"] = "asc" params["order_by"] = self.replication_key return params - - def prepare_request_payload( - self, - context: dict | None, # noqa: ARG002 - next_page_token: Any | None, # noqa: ARG002 - ) -> dict | None: - """Prepare the data payload for the REST API request. - - By default, no payload will be sent (return None). - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary with the JSON body for a POST requests. - """ - # TODO: Delete this method if no payload is required. (Most REST APIs.) - return None - - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - # TODO: Parse response body and return a set of records. - yield from extract_jsonpath(self.records_jsonpath, input=response.json()) - - def post_process( - self, - row: dict, - context: dict | None = None, # noqa: ARG002 - ) -> dict | None: - """As needed, append or transform raw data to match expected structure. - - Args: - row: An individual record from the stream. - context: The stream context. - - Returns: - The updated record dictionary, or ``None`` to skip the record. - """ - # TODO: Delete this method if not needed. - return row \ No newline at end of file diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index 13cbbba..d6a5fa1 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -110,6 +110,9 @@ class ListsStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated path which has the api version + """ version = self.config.get("api_version_1", "") base_url = "https://api.hubapi.com/contacts/{}".format(version) return base_url @@ -196,6 +199,9 @@ class UsersStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/settings/{}".format(version) return base_url @@ -283,6 +289,9 @@ class OwnersStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -372,6 +381,9 @@ class TicketPipelineStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_1", "") base_url = "https://api.hubapi.com/crm-pipelines/{}".format(version) return base_url @@ -461,6 +473,9 @@ class DealPipelineStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_1", "") base_url = "https://api.hubapi.com/crm-pipelines/{}".format(version) return base_url @@ -550,6 +565,9 @@ class EmailSubscriptionStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_1", "") base_url = "https://api.hubapi.com/email/public/{}".format(version) return base_url @@ -648,6 +666,9 @@ class PropertyTicketStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -697,6 +718,9 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ row["hubspot_object"] = "ticket" @@ -753,6 +777,9 @@ class PropertyDealStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -779,6 +806,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "deal" except: @@ -836,6 +867,9 @@ class PropertyContactStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -862,6 +896,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "contact" except: @@ -919,6 +957,9 @@ class PropertyCompanyStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -945,6 +986,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "company" except: @@ -1002,6 +1047,9 @@ class PropertyProductStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1028,6 +1076,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "product" except: @@ -1085,6 +1137,9 @@ class PropertyLineItemStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1111,6 +1166,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "line_item" except: @@ -1168,6 +1227,9 @@ class PropertyEmailStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1194,6 +1256,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "email" except: @@ -1251,6 +1317,9 @@ class PropertyPostalMailStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1277,6 +1346,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "postal_mail" except: @@ -1334,6 +1407,9 @@ class PropertyCallStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1360,6 +1436,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "call" except: @@ -1417,6 +1497,9 @@ class PropertyMeetingStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1443,6 +1526,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "meeting" except: @@ -1500,6 +1587,9 @@ class PropertyTaskStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1526,6 +1616,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "task" except: @@ -1583,6 +1677,9 @@ class PropertyCommunicationStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1609,6 +1706,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "communication" except: @@ -1666,6 +1767,9 @@ class PropertyNotesStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1692,6 +1796,10 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["hubspot_object"] = "note" except: @@ -1778,6 +1886,9 @@ class AssociationContactCompanyTypeStream(HubspotStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -1830,11 +1941,18 @@ class AssociationContactCompanyLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "contact" row["to_object_type"] = "company" @@ -1870,11 +1988,18 @@ class AssociationDealContactTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "deal" row["to_object_type"] = "contact" @@ -1910,11 +2035,18 @@ class AssociationDealContactLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "deal" row["to_object_type"] = "contact" @@ -1950,11 +2082,18 @@ class AssociationDealCompanyTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "deal" row["to_object_type"] = "company" @@ -1990,11 +2129,18 @@ class AssociationDealCompanyLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "deal" row["to_object_type"] = "company" @@ -2030,11 +2176,18 @@ class AssociationTicketContactTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "contact" @@ -2070,11 +2223,18 @@ class AssociationTicketContactLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "contact" @@ -2110,11 +2270,18 @@ class AssociationTicketCompanyTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "company" @@ -2150,11 +2317,18 @@ class AssociationTicketCompanyLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "company" @@ -2190,11 +2364,18 @@ class AssociationTicketDealTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "deal" @@ -2230,11 +2411,18 @@ class AssociationTicketDealLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "deal" @@ -2270,11 +2458,18 @@ class AssociationTicketCommunicationTypeStream(AssociationContactCompanyTypeStre @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "communication" @@ -2310,11 +2505,18 @@ class AssociationTicketCommunicationLabelStream(AssociationContactCompanyTypeStr @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "communication" @@ -2350,11 +2552,18 @@ class AssociationTicketCallTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "call" @@ -2390,11 +2599,18 @@ class AssociationTicketCallLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "call" @@ -2430,11 +2646,18 @@ class AssociationTicketMeetingTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "meeting" @@ -2470,11 +2693,18 @@ class AssociationTicketMeetingLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "meeting" @@ -2510,11 +2740,18 @@ class AssociationTicketNoteTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "note" @@ -2550,11 +2787,18 @@ class AssociationTicketNoteLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "note" @@ -2590,11 +2834,18 @@ class AssociationTicketTaskTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "task" @@ -2630,11 +2881,18 @@ class AssociationTicketTaskLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "task" @@ -2670,11 +2928,18 @@ class AssociationTicketEmailTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "email" @@ -2710,11 +2975,18 @@ class AssociationTicketEmailLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "email" @@ -2750,11 +3022,18 @@ class AssociationTicketPostalMailTypeStream(AssociationContactCompanyTypeStream) @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "postal_mail" @@ -2790,11 +3069,18 @@ class AssociationTicketPostalMailLabelStream(AssociationContactCompanyTypeStream @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "ticket" row["to_object_type"] = "postal_mail" @@ -2830,11 +3116,18 @@ class AssociationLineItemDealTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "line_item" row["to_object_type"] = "deal" @@ -2870,11 +3163,18 @@ class AssociationLineItemDealLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "line_item" row["to_object_type"] = "deal" @@ -2910,11 +3210,18 @@ class AssociationCommunicationContactTypeStream(AssociationContactCompanyTypeStr @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "communication" row["to_object_type"] = "contact" @@ -2950,11 +3257,18 @@ class AssociationCommunicationContactLabelStream(AssociationContactCompanyTypeSt @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "communication" row["to_object_type"] = "contact" @@ -2990,11 +3304,18 @@ class AssociationCommunicationCompanyTypeStream(AssociationContactCompanyTypeStr @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "communication" row["to_object_type"] = "company" @@ -3030,11 +3351,18 @@ class AssociationCommunicationCompanyLabelStream(AssociationContactCompanyTypeSt @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "communication" row["to_object_type"] = "company" @@ -3070,11 +3398,18 @@ class AssociationCommunicationDealTypeStream(AssociationContactCompanyTypeStream @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "communication" row["to_object_type"] = "deal" @@ -3110,11 +3445,18 @@ class AsociationCommunicationDealLabelStream(AssociationContactCompanyTypeStream @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "communication" row["to_object_type"] = "deal" @@ -3150,11 +3492,18 @@ class AssociationCallContactTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "call" row["to_object_type"] = "contact" @@ -3190,11 +3539,18 @@ class AssociationCallContactLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "call" row["to_object_type"] = "contact" @@ -3230,11 +3586,18 @@ class AssociationCallCompanyTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "call" row["to_object_type"] = "company" @@ -3270,11 +3633,18 @@ class AssociationCallCompanyLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "call" row["to_object_type"] = "company" @@ -3310,11 +3680,18 @@ class AssociationCallDealTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "call" row["to_object_type"] = "deal" @@ -3350,11 +3727,18 @@ class AssociationCallDealLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "call" row["to_object_type"] = "deal" @@ -3390,11 +3774,18 @@ class AssociationEmailContactTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "email" row["to_object_type"] = "contact" @@ -3430,11 +3821,18 @@ class AssociationEmailContactLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "email" row["to_object_type"] = "contact" @@ -3470,11 +3868,18 @@ class AssociationEmailCompanyTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "email" row["to_object_type"] = "company" @@ -3510,11 +3915,18 @@ class AssociationEmailCompanyLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "email" row["to_object_type"] = "company" @@ -3550,11 +3962,18 @@ class AssociationEmailDealTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "email" row["to_object_type"] = "deal" @@ -3590,11 +4009,18 @@ class AssociationEmailDealLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "email" row["to_object_type"] = "deal" @@ -3630,11 +4056,18 @@ class AssociationMeetingContactTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "meeting" row["to_object_type"] = "contact" @@ -3670,6 +4103,9 @@ class AssociationMeetingContactLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url @@ -3710,11 +4146,18 @@ class AssociationMeetingCompanyTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "meeting" row["to_object_type"] = "company" @@ -3750,11 +4193,18 @@ class AssociationMeetingCompanyLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "meeting" row["to_object_type"] = "company" @@ -3790,11 +4240,18 @@ class AssociationMeetingDealTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "meeting" row["to_object_type"] = "deal" @@ -3830,11 +4287,18 @@ class AssociationMeetingDealLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "meeting" row["to_object_type"] = "deal" @@ -3870,11 +4334,18 @@ class AssociationNoteContactTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "note" row["to_object_type"] = "contact" @@ -3910,11 +4381,18 @@ class AssociationNoteContactLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "note" row["to_object_type"] = "contact" @@ -3950,11 +4428,18 @@ class AssociationNoteCompanyTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "note" row["to_object_type"] = "company" @@ -3990,11 +4475,18 @@ class AssociationNoteCompanyLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "note" row["to_object_type"] = "company" @@ -4030,11 +4522,18 @@ class AssoxationNoteDealTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "note" row["to_object_type"] = "deal" @@ -4070,11 +4569,18 @@ class AssociationNoteDealLabel(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "note" row["to_object_type"] = "deal" @@ -4110,11 +4616,18 @@ class AssociationTaskContactTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "task" row["to_object_type"] = "contact" @@ -4150,11 +4663,18 @@ class AssociationTaskContactLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "task" row["to_object_type"] = "contact" @@ -4190,11 +4710,18 @@ class AssociationTaskCompanyTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "task" row["to_object_type"] = "company" @@ -4230,11 +4757,18 @@ class AssociationTaskCompanyLabelstream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "task" row["to_object_type"] = "company" @@ -4270,11 +4804,18 @@ class AssociationTaskDealTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "task" row["to_object_type"] = "deal" @@ -4310,11 +4851,18 @@ class AssociationTaskDealLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "task" row["to_object_type"] = "deal" @@ -4350,11 +4898,18 @@ class AssociationPostalMailContactTypeStream(AssociationContactCompanyTypeStream @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "contact" @@ -4390,11 +4945,18 @@ class AssociationPostalMailContactLabelStream(AssociationContactCompanyTypeStrea @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "contact" @@ -4430,11 +4992,18 @@ class AssociationPostalMailCompanyTypeStream(AssociationContactCompanyTypeStream @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "company" @@ -4470,11 +5039,18 @@ class AssociationPostalMailCompanyLabelStream(AssociationContactCompanyTypeStrea @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "company" @@ -4510,11 +5086,18 @@ class AssociationPostalMailDealTypeStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "deal" @@ -4550,11 +5133,18 @@ class AssociationPostalMailDealLabelStream(AssociationContactCompanyTypeStream): @property def url_base(self) -> str: + """ + Returns an updated which has the api version + """ version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: + """ + Returns api records with added columns + """ + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "deal" diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot_sdk/tap.py index 9b18cee..2c2a493 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot_sdk/tap.py @@ -5,7 +5,6 @@ from singer_sdk import Tap from singer_sdk import typing as th # JSON schema typing helpers -# TODO: Import your custom stream types here: from tap_hubspot_sdk import streams @@ -14,7 +13,6 @@ class TapHubspot(Tap): name = "tap-hubspot-sdk" - # TODO: Update this section with the actual config values you expect: config_jsonschema = th.PropertiesList( th.Property( "access_token", @@ -44,7 +42,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.PropertyNotesStream(self), streams.AssociationPostalMailDealLabelStream(self), ] - + if __name__ == "__main__": TapHubspot.cli() From 8f1682829f67cddd1ee7f502118a8f5c9852efeb Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Tue, 13 Jun 2023 11:19:05 -0400 Subject: [PATCH 026/105] add .meltano to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 7d2cc21..650746d 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ share/python-wheels/ .installed.cfg *.egg .idea +.meltano MANIFEST # PyInstaller From fa5e00216a960f5567f3a3480744d223682791cf Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Tue, 13 Jun 2023 11:24:26 -0400 Subject: [PATCH 027/105] Poetry Black Reformatting --- tap_hubspot_sdk/client.py | 37 +- tap_hubspot_sdk/streams.py | 1126 ++++++++++++++++++++---------------- tap_hubspot_sdk/tap.py | 2 +- 3 files changed, 660 insertions(+), 505 deletions(-) diff --git a/tap_hubspot_sdk/client.py b/tap_hubspot_sdk/client.py index 42c4b78..f08fdfc 100644 --- a/tap_hubspot_sdk/client.py +++ b/tap_hubspot_sdk/client.py @@ -18,10 +18,14 @@ else: from cached_property import cached_property -from singer_sdk.authenticators import BearerTokenAuthenticator, SimpleAuthenticator, APIAuthenticatorBase +from singer_sdk.authenticators import ( + BearerTokenAuthenticator, + SimpleAuthenticator, + APIAuthenticatorBase, +) _Auth = Callable[[requests.PreparedRequest], requests.PreparedRequest] -#SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") +# SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") class HubspotStream(RESTStream): @@ -38,7 +42,7 @@ def url_base(self) -> str: records_jsonpath = "$[*]" # Or override `parse_response`. # Set this value or override `get_new_paginator`. - next_page_token_jsonpath = "$.next_page" + next_page_token_jsonpath = "$.next_page" @cached_property def authenticator(self) -> _Auth: @@ -52,16 +56,26 @@ def authenticator(self) -> _Auth: auth_type = self.config.get("auth_type") if auth_type == "oauth": - return BearerTokenAuthenticator.create_for_stream(self, - token=access_token, ) - + return BearerTokenAuthenticator.create_for_stream( + self, + token=access_token, + ) + elif auth_type == "simple": - return SimpleAuthenticator(self, - auth_headers={"Authorization": "Bearer {}".format(access_token),},) - + return SimpleAuthenticator( + self, + auth_headers={ + "Authorization": "Bearer {}".format(access_token), + }, + ) + elif auth_type == "api": - APIAuthenticatorBase.auth_headers = {"Authorization": "Bearer {}".format(access_token),} - return APIAuthenticatorBase(self,) + APIAuthenticatorBase.auth_headers = { + "Authorization": "Bearer {}".format(access_token), + } + return APIAuthenticatorBase( + self, + ) @property def http_headers(self) -> dict: @@ -111,4 +125,3 @@ def get_url_params( params["sort"] = "asc" params["order_by"] = self.replication_key return params - \ No newline at end of file diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index d6a5fa1..4a6ba85 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -32,7 +32,7 @@ class ListsStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ vid, canonical-vid, merged-vids, portal-id, is-contact, properties """ @@ -49,63 +49,63 @@ class ListsStream(HubspotStream): Property("merged-vids", ArrayType(StringType)), Property("portal-id", IntegerType), Property("is-contact", BooleanType), - Property("properties", - ObjectType(Property("lastmodifieddate", StringType), - Property("email", StringType), - Property("message", StringType), - Property("city", StringType), - Property("company", StringType), - Property("createddate", StringType), - Property("firstname", StringType), - Property("hs_all_contact_vids", IntegerType), - Property("hs_date_entered_lead", StringType), - Property("hs_marketable_reason_id", StringType), - Property("hs_is_unworked", BooleanType), - Property("hs_marketable_until_renewal", BooleanType), - Property("hs_latest_source_timestamp", StringType), - Property("hs_marketable_reason_type", StringType), - Property("hs_marketable_status", BooleanType), - Property("hs_is_contact", BooleanType), - Property("hs_email_domain", StringType), - Property("hs_pipeline", StringType), - Property("hs_sequences_actively_enrolled_count", StringType), - Property("hs_object_id", StringType), - Property("hs_time_in_lead", StringType), - Property("num_conversion_events", StringType), - Property("num_unique_conversion_events", StringType), - Property("lastname", StringType), - Property("hs_analytics_num_page_views", StringType), - Property("hs_analytics_num_event_completions", StringType), - Property("hs_analytics_first_timestamp", StringType), - Property("hs_social_twitter_clicks", StringType), - Property("hs_analytics_num_visits", StringType), - Property("twitterprofilephoto", StringType), - Property("twitterhandle", StringType), - Property("hs_analytics_source_data_2", StringType), - Property("hs_social_facebook_clicks", StringType), - Property("hs_analytics_source", StringType), - Property("hs_analytics_source_data_1", StringType), - Property("hs_latest_source", StringType), - Property("hs_latest_source_data_1", StringType), - Property("hs_latest_source_data_2", StringType), - Property("hs_social_google_plus_clicks", StringType), - Property("hs_social_num_broadcast_clicks", StringType), - Property("state", StringType), - Property("hs_social_linkedin_clicks", StringType), - Property("hs_lifecyclestage_lead_date", StringType), - Property("hs_analytics_revenue", StringType), - Property("hs_analytics_average_page_views", StringType), - Property("website", StringType), - Property("lifecyclestage", StringType), - Property("jobtitle", StringType), - ) - - ), + Property( + "properties", + ObjectType( + Property("lastmodifieddate", StringType), + Property("email", StringType), + Property("message", StringType), + Property("city", StringType), + Property("company", StringType), + Property("createddate", StringType), + Property("firstname", StringType), + Property("hs_all_contact_vids", IntegerType), + Property("hs_date_entered_lead", StringType), + Property("hs_marketable_reason_id", StringType), + Property("hs_is_unworked", BooleanType), + Property("hs_marketable_until_renewal", BooleanType), + Property("hs_latest_source_timestamp", StringType), + Property("hs_marketable_reason_type", StringType), + Property("hs_marketable_status", BooleanType), + Property("hs_is_contact", BooleanType), + Property("hs_email_domain", StringType), + Property("hs_pipeline", StringType), + Property("hs_sequences_actively_enrolled_count", StringType), + Property("hs_object_id", StringType), + Property("hs_time_in_lead", StringType), + Property("num_conversion_events", StringType), + Property("num_unique_conversion_events", StringType), + Property("lastname", StringType), + Property("hs_analytics_num_page_views", StringType), + Property("hs_analytics_num_event_completions", StringType), + Property("hs_analytics_first_timestamp", StringType), + Property("hs_social_twitter_clicks", StringType), + Property("hs_analytics_num_visits", StringType), + Property("twitterprofilephoto", StringType), + Property("twitterhandle", StringType), + Property("hs_analytics_source_data_2", StringType), + Property("hs_social_facebook_clicks", StringType), + Property("hs_analytics_source", StringType), + Property("hs_analytics_source_data_1", StringType), + Property("hs_latest_source", StringType), + Property("hs_latest_source_data_1", StringType), + Property("hs_latest_source_data_2", StringType), + Property("hs_social_google_plus_clicks", StringType), + Property("hs_social_num_broadcast_clicks", StringType), + Property("state", StringType), + Property("hs_social_linkedin_clicks", StringType), + Property("hs_lifecyclestage_lead_date", StringType), + Property("hs_analytics_revenue", StringType), + Property("hs_analytics_average_page_views", StringType), + Property("website", StringType), + Property("lifecyclestage", StringType), + Property("jobtitle", StringType), + ), + ), Property("form-submissions", ArrayType(StringType)), Property("identity-profiles", ArrayType(StringType)), Property("merge-audits", ArrayType(StringType)), Property("addedAt", StringType), - ).to_dict() @property @@ -118,9 +118,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -136,13 +136,61 @@ def get_url_params( params["page"] = next_page_token if self.replication_key: params["sort"] = "asc" - params["order_by"] = self.replication_key + params["order_by"] = self.replication_key - params["property"] = "message","email","city","company","createddate","firstname","hs_all_contact_vids","hs_date_entered_lead","hs_marketable_reason_id","hs_is_unworked","hs_marketable_until_renewal","hs_latest_source_timestamp","hs_marketable_reason_type","hs_marketable_status","hs_is_contact","hs_email_domain","hs_pipeline","hs_sequences_actively_enrolled_count","hs_object_id","hs_time_in_lead","num_conversion_events","num_unique_conversion_events","lastname","hs_analytics_num_page_views","hs_analytics_num_event_completions","hs_analytics_first_timestamp","hs_social_twitter_clicks","hs_analytics_num_visits","twitterprofilephoto","twitterhandle","hs_analytics_source_data_2","hs_social_facebook_clicks","hs_analytics_source","hs_analytics_source_data_1","hs_latest_source","hs_latest_source_data_1","hs_latest_source_data_2","hs_social_google_plus_clicks","hs_social_num_broadcast_clicks","state","hs_social_linkedin_clicks","hs_lifecyclestage_lead_date","hs_analytics_revenue","hs_analytics_average_page_views","website","lifecyclestage","jobtitle" + params["property"] = ( + "message", + "email", + "city", + "company", + "createddate", + "firstname", + "hs_all_contact_vids", + "hs_date_entered_lead", + "hs_marketable_reason_id", + "hs_is_unworked", + "hs_marketable_until_renewal", + "hs_latest_source_timestamp", + "hs_marketable_reason_type", + "hs_marketable_status", + "hs_is_contact", + "hs_email_domain", + "hs_pipeline", + "hs_sequences_actively_enrolled_count", + "hs_object_id", + "hs_time_in_lead", + "num_conversion_events", + "num_unique_conversion_events", + "lastname", + "hs_analytics_num_page_views", + "hs_analytics_num_event_completions", + "hs_analytics_first_timestamp", + "hs_social_twitter_clicks", + "hs_analytics_num_visits", + "twitterprofilephoto", + "twitterhandle", + "hs_analytics_source_data_2", + "hs_social_facebook_clicks", + "hs_analytics_source", + "hs_analytics_source_data_1", + "hs_latest_source", + "hs_latest_source_data_1", + "hs_latest_source_data_2", + "hs_social_google_plus_clicks", + "hs_social_num_broadcast_clicks", + "state", + "hs_social_linkedin_clicks", + "hs_lifecyclestage_lead_date", + "hs_analytics_revenue", + "hs_analytics_average_page_views", + "website", + "lifecyclestage", + "jobtitle", + ) params["propertyMode"] = "value_and_history" return params - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -163,7 +211,8 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: results = resp_json yield from results - + + class UsersStream(HubspotStream): """ @@ -178,7 +227,7 @@ class UsersStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, email, roleIds, primaryteamid """ @@ -186,15 +235,14 @@ class UsersStream(HubspotStream): name = "users" path = "/users?fields={}".format(columns) primary_keys = ["id"] - #replication_key = "LastModifiedDate" - #replication_method = "incremental" + # replication_key = "LastModifiedDate" + # replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), Property("email", StringType), Property("roleIds", ArrayType(StringType)), Property("primaryteamid", StringType), - ).to_dict() @property @@ -207,9 +255,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -250,6 +298,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class OwnersStream(HubspotStream): """ @@ -264,7 +313,7 @@ class OwnersStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, email, firstName, lastName, userId, createdAt, updatedAt, archived """ @@ -284,7 +333,6 @@ class OwnersStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -297,9 +345,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -340,6 +388,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class TicketPipelineStream(HubspotStream): """ @@ -354,7 +403,7 @@ class TicketPipelineStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default """ @@ -376,7 +425,6 @@ class TicketPipelineStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("default", StringType), - ).to_dict() @property @@ -389,9 +437,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -430,7 +478,8 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: else: results = resp_json - yield from results + yield from results + class DealPipelineStream(HubspotStream): @@ -446,7 +495,7 @@ class DealPipelineStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default """ @@ -468,7 +517,6 @@ class DealPipelineStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("default", StringType), - ).to_dict() @property @@ -481,9 +529,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -522,7 +570,8 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: else: results = resp_json - yield from results + yield from results + class EmailSubscriptionStream(HubspotStream): @@ -538,7 +587,7 @@ class EmailSubscriptionStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, portalId, name, description, active, internal, category, channel, internalName, businessUnitId """ @@ -560,7 +609,6 @@ class EmailSubscriptionStream(HubspotStream): Property("channel", StringType), Property("internalName", StringType), Property("businessUnitId", StringType), - ).to_dict() @property @@ -573,9 +621,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -616,6 +664,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class PropertyTicketStream(HubspotStream): """ @@ -630,7 +679,7 @@ class PropertyTicketStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -661,7 +710,6 @@ class PropertyTicketStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -674,9 +722,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -721,10 +769,11 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + row["hubspot_object"] = "ticket" - - return super().post_process(row, context) + + return super().post_process(row, context) + class PropertyDealStream(HubspotStream): @@ -740,7 +789,7 @@ class PropertyDealStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -772,7 +821,6 @@ class PropertyDealStream(HubspotStream): Property("hubspot_object", StringType), Property("calculationFormula", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -783,7 +831,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -814,9 +862,10 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: row["hubspot_object"] = "deal" except: pass - + return super().post_process(row, context) + class PropertyContactStream(HubspotStream): """ @@ -831,7 +880,7 @@ class PropertyContactStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -862,7 +911,6 @@ class PropertyContactStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -873,7 +921,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -904,9 +952,10 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: row["hubspot_object"] = "contact" except: pass - + return super().post_process(row, context) + class PropertyCompanyStream(HubspotStream): """ @@ -921,7 +970,7 @@ class PropertyCompanyStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -952,7 +1001,6 @@ class PropertyCompanyStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -963,7 +1011,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -989,14 +1037,15 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "company" except: pass - + return super().post_process(row, context) + class PropertyProductStream(HubspotStream): """ @@ -1011,7 +1060,7 @@ class PropertyProductStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -1042,7 +1091,6 @@ class PropertyProductStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -1053,7 +1101,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1079,14 +1127,15 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "product" except: pass - + return super().post_process(row, context) + class PropertyLineItemStream(HubspotStream): """ @@ -1101,7 +1150,7 @@ class PropertyLineItemStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -1132,7 +1181,6 @@ class PropertyLineItemStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -1143,7 +1191,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1169,14 +1217,15 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "line_item" except: pass - + return super().post_process(row, context) + class PropertyEmailStream(HubspotStream): """ @@ -1191,7 +1240,7 @@ class PropertyEmailStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -1222,7 +1271,6 @@ class PropertyEmailStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -1233,7 +1281,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1259,13 +1307,14 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "email" except: pass - - return super().post_process(row, context) + + return super().post_process(row, context) + class PropertyPostalMailStream(HubspotStream): @@ -1281,7 +1330,7 @@ class PropertyPostalMailStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -1312,7 +1361,6 @@ class PropertyPostalMailStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -1323,7 +1371,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1349,14 +1397,15 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "postal_mail" except: pass - + return super().post_process(row, context) + class PropertyCallStream(HubspotStream): """ @@ -1371,7 +1420,7 @@ class PropertyCallStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -1402,7 +1451,6 @@ class PropertyCallStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -1413,7 +1461,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1439,13 +1487,14 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "call" except: pass - - return super().post_process(row, context) + + return super().post_process(row, context) + class PropertyMeetingStream(HubspotStream): @@ -1461,7 +1510,7 @@ class PropertyMeetingStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -1492,7 +1541,6 @@ class PropertyMeetingStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -1503,7 +1551,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1529,14 +1577,15 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "meeting" except: pass - + return super().post_process(row, context) - + + class PropertyTaskStream(HubspotStream): """ @@ -1551,7 +1600,7 @@ class PropertyTaskStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -1582,7 +1631,6 @@ class PropertyTaskStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -1593,7 +1641,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1619,14 +1667,15 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "task" except: pass - + return super().post_process(row, context) + class PropertyCommunicationStream(HubspotStream): """ @@ -1641,7 +1690,7 @@ class PropertyCommunicationStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -1672,7 +1721,6 @@ class PropertyCommunicationStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -1683,7 +1731,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1709,14 +1757,15 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "communication" except: pass - + return super().post_process(row, context) + class PropertyNotesStream(HubspotStream): """ @@ -1731,7 +1780,7 @@ class PropertyNotesStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField @@ -1762,7 +1811,6 @@ class PropertyNotesStream(HubspotStream): Property("formField", BooleanType), Property("hubspot_object", StringType), Property("showCurrencySymbol", StringType), - ).to_dict() @property @@ -1773,7 +1821,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1799,55 +1847,50 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["hubspot_object"] = "note" except: pass - + return super().post_process(row, context) def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: - property_ticket = PropertyTicketStream( - self._tap, schema={"properties": {}} - ) - property_deal = PropertyDealStream( - self._tap, schema={"properties": {}} - ) - property_contact = PropertyContactStream( - self._tap, schema={"properties": {}} - ) - property_company = PropertyCompanyStream( - self._tap, schema={"properties": {}} - ) - property_product = PropertyProductStream( - self._tap, schema={"properties": {}} - ) - property_lineitem = PropertyLineItemStream( - self._tap, schema={"properties": {}} - ) - property_email = PropertyEmailStream( - self._tap, schema={"properties": {}} - ) + property_ticket = PropertyTicketStream(self._tap, schema={"properties": {}}) + property_deal = PropertyDealStream(self._tap, schema={"properties": {}}) + property_contact = PropertyContactStream(self._tap, schema={"properties": {}}) + property_company = PropertyCompanyStream(self._tap, schema={"properties": {}}) + property_product = PropertyProductStream(self._tap, schema={"properties": {}}) + property_lineitem = PropertyLineItemStream(self._tap, schema={"properties": {}}) + property_email = PropertyEmailStream(self._tap, schema={"properties": {}}) property_postalmail = PropertyPostalMailStream( self._tap, schema={"properties": {}} ) - property_call = PropertyCallStream( - self._tap, schema={"properties": {}} - ) - property_meeting = PropertyMeetingStream( - self._tap, schema={"properties": {}} - ) - property_task = PropertyTaskStream( - self._tap, schema={"properties": {}} - ) + property_call = PropertyCallStream(self._tap, schema={"properties": {}}) + property_meeting = PropertyMeetingStream(self._tap, schema={"properties": {}}) + property_task = PropertyTaskStream(self._tap, schema={"properties": {}}) property_communication = PropertyCommunicationStream( self._tap, schema={"properties": {}} ) - property_records = list(property_ticket.get_records(context)) + list(property_deal.get_records(context)) + list(property_contact.get_records(context)) + list(property_company.get_records(context)) + list(property_product.get_records(context)) + list(property_lineitem.get_records(context)) + list(property_email.get_records(context)) + list(property_postalmail.get_records(context)) + list(property_call.get_records(context)) + list(property_meeting.get_records(context)) + list(property_task.get_records(context)) + list(property_communication.get_records(context)) + list(super().get_records(context)) - + property_records = ( + list(property_ticket.get_records(context)) + + list(property_deal.get_records(context)) + + list(property_contact.get_records(context)) + + list(property_company.get_records(context)) + + list(property_product.get_records(context)) + + list(property_lineitem.get_records(context)) + + list(property_email.get_records(context)) + + list(property_postalmail.get_records(context)) + + list(property_call.get_records(context)) + + list(property_meeting.get_records(context)) + + list(property_task.get_records(context)) + + list(property_communication.get_records(context)) + + list(super().get_records(context)) + ) + return property_records - + + class AssociationContactCompanyTypeStream(HubspotStream): """ @@ -1862,7 +1905,7 @@ class AssociationContactCompanyTypeStream(HubspotStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -1881,7 +1924,6 @@ class AssociationContactCompanyTypeStream(HubspotStream): Property("category", StringType), Property("typeId", IntegerType), Property("label", StringType), - ).to_dict() @property @@ -1892,7 +1934,7 @@ def url_base(self) -> str: version = self.config.get("api_version_3", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def parse_response(self, response: requests.Response) -> Iterable[dict]: """Parse the response and return an iterator of result records. @@ -1913,7 +1955,8 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: results = resp_json yield from results - + + class AssociationContactCompanyLabelStream(AssociationContactCompanyTypeStream): """ @@ -1928,7 +1971,7 @@ class AssociationContactCompanyLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -1947,20 +1990,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "contact" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationDealContactTypeStream(AssociationContactCompanyTypeStream): """ @@ -1975,7 +2019,7 @@ class AssociationDealContactTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -1999,15 +2043,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "deal" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationDealContactLabelStream(AssociationContactCompanyTypeStream): """ @@ -2022,7 +2067,7 @@ class AssociationDealContactLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2041,20 +2086,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "deal" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationDealCompanyTypeStream(AssociationContactCompanyTypeStream): """ @@ -2069,7 +2115,7 @@ class AssociationDealCompanyTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2093,15 +2139,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "deal" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationDealCompanyLabelStream(AssociationContactCompanyTypeStream): """ @@ -2116,7 +2163,7 @@ class AssociationDealCompanyLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2135,20 +2182,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "deal" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationTicketContactTypeStream(AssociationContactCompanyTypeStream): """ @@ -2163,7 +2211,7 @@ class AssociationTicketContactTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2187,15 +2235,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationTicketContactLabelStream(AssociationContactCompanyTypeStream): """ @@ -2210,7 +2259,7 @@ class AssociationTicketContactLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2229,20 +2278,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationTicketCompanyTypeStream(AssociationContactCompanyTypeStream): """ @@ -2257,7 +2307,7 @@ class AssociationTicketCompanyTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2281,15 +2331,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) - + + class AssociationTicketCompanyLabelStream(AssociationContactCompanyTypeStream): """ @@ -2304,7 +2355,7 @@ class AssociationTicketCompanyLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2323,20 +2374,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "company" except: pass - - return super().post_process(row, context) - + + return super().post_process(row, context) + + class AssociationTicketDealTypeStream(AssociationContactCompanyTypeStream): """ @@ -2351,7 +2403,7 @@ class AssociationTicketDealTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2375,15 +2427,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) - + + class AssociationTicketDealLabelStream(AssociationContactCompanyTypeStream): """ @@ -2398,7 +2451,7 @@ class AssociationTicketDealLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2417,19 +2470,20 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "deal" except: pass - - return super().post_process(row, context) + + return super().post_process(row, context) + class AssociationTicketCommunicationTypeStream(AssociationContactCompanyTypeStream): @@ -2445,7 +2499,7 @@ class AssociationTicketCommunicationTypeStream(AssociationContactCompanyTypeStre primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2469,15 +2523,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "communication" except: pass - + return super().post_process(row, context) + class AssociationTicketCommunicationLabelStream(AssociationContactCompanyTypeStream): """ @@ -2492,7 +2547,7 @@ class AssociationTicketCommunicationLabelStream(AssociationContactCompanyTypeStr primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2511,20 +2566,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "communication" except: pass - + return super().post_process(row, context) + class AssociationTicketCallTypeStream(AssociationContactCompanyTypeStream): """ @@ -2539,7 +2595,7 @@ class AssociationTicketCallTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2563,15 +2619,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "call" except: pass - + return super().post_process(row, context) - + + class AssociationTicketCallLabelStream(AssociationContactCompanyTypeStream): """ @@ -2586,7 +2643,7 @@ class AssociationTicketCallLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2605,20 +2662,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "call" except: pass - + return super().post_process(row, context) + class AssociationTicketMeetingTypeStream(AssociationContactCompanyTypeStream): """ @@ -2633,7 +2691,7 @@ class AssociationTicketMeetingTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2657,15 +2715,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "meeting" except: pass - + return super().post_process(row, context) + class AssociationTicketMeetingLabelStream(AssociationContactCompanyTypeStream): """ @@ -2680,7 +2739,7 @@ class AssociationTicketMeetingLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2699,20 +2758,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "meeting" except: pass - + return super().post_process(row, context) + class AssociationTicketNoteTypeStream(AssociationContactCompanyTypeStream): """ @@ -2727,7 +2787,7 @@ class AssociationTicketNoteTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2751,15 +2811,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "note" except: pass - + return super().post_process(row, context) + class AssociationTicketNoteLabelStream(AssociationContactCompanyTypeStream): """ @@ -2774,7 +2835,7 @@ class AssociationTicketNoteLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2793,20 +2854,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "note" except: pass - - return super().post_process(row, context) - + + return super().post_process(row, context) + + class AssociationTicketTaskTypeStream(AssociationContactCompanyTypeStream): """ @@ -2821,7 +2883,7 @@ class AssociationTicketTaskTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2845,15 +2907,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "task" except: pass - + return super().post_process(row, context) + class AssociationTicketTaskLabelStream(AssociationContactCompanyTypeStream): """ @@ -2868,7 +2931,7 @@ class AssociationTicketTaskLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2887,20 +2950,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "task" except: pass - + return super().post_process(row, context) + class AssociationTicketEmailTypeStream(AssociationContactCompanyTypeStream): """ @@ -2915,7 +2979,7 @@ class AssociationTicketEmailTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -2939,15 +3003,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "email" except: pass - + return super().post_process(row, context) + class AssociationTicketEmailLabelStream(AssociationContactCompanyTypeStream): """ @@ -2962,7 +3027,7 @@ class AssociationTicketEmailLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -2981,20 +3046,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "email" except: pass - + return super().post_process(row, context) + class AssociationTicketPostalMailTypeStream(AssociationContactCompanyTypeStream): """ @@ -3009,7 +3075,7 @@ class AssociationTicketPostalMailTypeStream(AssociationContactCompanyTypeStream) primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3033,15 +3099,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "postal_mail" except: pass - + return super().post_process(row, context) + class AssociationTicketPostalMailLabelStream(AssociationContactCompanyTypeStream): """ @@ -3056,7 +3123,7 @@ class AssociationTicketPostalMailLabelStream(AssociationContactCompanyTypeStream primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3075,20 +3142,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "ticket" row["to_object_type"] = "postal_mail" except: pass - + return super().post_process(row, context) + class AssociationLineItemDealTypeStream(AssociationContactCompanyTypeStream): """ @@ -3103,7 +3171,7 @@ class AssociationLineItemDealTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3127,15 +3195,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "line_item" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationLineItemDealLabelStream(AssociationContactCompanyTypeStream): """ @@ -3150,7 +3219,7 @@ class AssociationLineItemDealLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3169,20 +3238,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "line_item" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationCommunicationContactTypeStream(AssociationContactCompanyTypeStream): """ @@ -3197,7 +3267,7 @@ class AssociationCommunicationContactTypeStream(AssociationContactCompanyTypeStr primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3221,15 +3291,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "communication" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationCommunicationContactLabelStream(AssociationContactCompanyTypeStream): """ @@ -3244,7 +3315,7 @@ class AssociationCommunicationContactLabelStream(AssociationContactCompanyTypeSt primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3263,20 +3334,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "communication" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationCommunicationCompanyTypeStream(AssociationContactCompanyTypeStream): """ @@ -3291,7 +3363,7 @@ class AssociationCommunicationCompanyTypeStream(AssociationContactCompanyTypeStr primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3315,15 +3387,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "communication" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationCommunicationCompanyLabelStream(AssociationContactCompanyTypeStream): """ @@ -3338,7 +3411,7 @@ class AssociationCommunicationCompanyLabelStream(AssociationContactCompanyTypeSt primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3357,20 +3430,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "communication" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationCommunicationDealTypeStream(AssociationContactCompanyTypeStream): """ @@ -3385,7 +3459,7 @@ class AssociationCommunicationDealTypeStream(AssociationContactCompanyTypeStream primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3409,15 +3483,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "communication" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AsociationCommunicationDealLabelStream(AssociationContactCompanyTypeStream): """ @@ -3432,7 +3507,7 @@ class AsociationCommunicationDealLabelStream(AssociationContactCompanyTypeStream primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3451,20 +3526,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "communication" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationCallContactTypeStream(AssociationContactCompanyTypeStream): """ @@ -3479,7 +3555,7 @@ class AssociationCallContactTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3503,15 +3579,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "call" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationCallContactLabelStream(AssociationContactCompanyTypeStream): """ @@ -3526,7 +3603,7 @@ class AssociationCallContactLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3545,20 +3622,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "call" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationCallCompanyTypeStream(AssociationContactCompanyTypeStream): """ @@ -3573,7 +3651,7 @@ class AssociationCallCompanyTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3597,15 +3675,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "call" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationCallCompanyLabelStream(AssociationContactCompanyTypeStream): """ @@ -3620,7 +3699,7 @@ class AssociationCallCompanyLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3639,20 +3718,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "call" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationCallDealTypeStream(AssociationContactCompanyTypeStream): """ @@ -3667,7 +3747,7 @@ class AssociationCallDealTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3691,15 +3771,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "call" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationCallDealLabelStream(AssociationContactCompanyTypeStream): """ @@ -3714,7 +3795,7 @@ class AssociationCallDealLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3733,20 +3814,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "call" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) - + + class AssociationEmailContactTypeStream(AssociationContactCompanyTypeStream): """ @@ -3761,7 +3843,7 @@ class AssociationEmailContactTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3785,15 +3867,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "email" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationEmailContactLabelStream(AssociationContactCompanyTypeStream): """ @@ -3808,7 +3891,7 @@ class AssociationEmailContactLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3827,20 +3910,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "email" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationEmailCompanyTypeStream(AssociationContactCompanyTypeStream): """ @@ -3855,7 +3939,7 @@ class AssociationEmailCompanyTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3879,15 +3963,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "email" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationEmailCompanyLabelStream(AssociationContactCompanyTypeStream): """ @@ -3902,7 +3987,7 @@ class AssociationEmailCompanyLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -3921,20 +4006,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "email" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationEmailDealTypeStream(AssociationContactCompanyTypeStream): """ @@ -3949,7 +4035,7 @@ class AssociationEmailDealTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -3973,15 +4059,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "email" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationEmailDealLabelStream(AssociationContactCompanyTypeStream): """ @@ -3996,7 +4083,7 @@ class AssociationEmailDealLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4015,20 +4102,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "email" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationMeetingContactTypeStream(AssociationContactCompanyTypeStream): """ @@ -4043,7 +4131,7 @@ class AssociationMeetingContactTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4067,15 +4155,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "meeting" row["to_object_type"] = "contact" except: pass - - return super().post_process(row, context) - + + return super().post_process(row, context) + + class AssociationMeetingContactLabelStream(AssociationContactCompanyTypeStream): """ @@ -4090,7 +4179,7 @@ class AssociationMeetingContactLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4109,16 +4198,17 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: try: row["from_object_type"] = "meeting" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationMeetingCompanyTypeStream(AssociationContactCompanyTypeStream): """ @@ -4133,7 +4223,7 @@ class AssociationMeetingCompanyTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4157,15 +4247,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "meeting" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationMeetingCompanyLabelStream(AssociationContactCompanyTypeStream): """ @@ -4180,7 +4271,7 @@ class AssociationMeetingCompanyLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4199,20 +4290,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "meeting" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationMeetingDealTypeStream(AssociationContactCompanyTypeStream): """ @@ -4227,7 +4319,7 @@ class AssociationMeetingDealTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4251,15 +4343,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "meeting" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationMeetingDealLabelStream(AssociationContactCompanyTypeStream): """ @@ -4274,7 +4367,7 @@ class AssociationMeetingDealLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4293,20 +4386,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "meeting" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) - + + class AssociationNoteContactTypeStream(AssociationContactCompanyTypeStream): """ @@ -4321,7 +4415,7 @@ class AssociationNoteContactTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4345,15 +4439,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "note" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationNoteContactLabelStream(AssociationContactCompanyTypeStream): """ @@ -4368,7 +4463,7 @@ class AssociationNoteContactLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4387,20 +4482,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "note" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationNoteCompanyTypeStream(AssociationContactCompanyTypeStream): """ @@ -4415,7 +4511,7 @@ class AssociationNoteCompanyTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4439,15 +4535,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "note" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationNoteCompanyLabelStream(AssociationContactCompanyTypeStream): """ @@ -4462,7 +4559,7 @@ class AssociationNoteCompanyLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4481,20 +4578,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "note" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssoxationNoteDealTypeStream(AssociationContactCompanyTypeStream): """ @@ -4509,7 +4607,7 @@ class AssoxationNoteDealTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4533,15 +4631,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "note" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationNoteDealLabel(AssociationContactCompanyTypeStream): """ @@ -4556,7 +4655,7 @@ class AssociationNoteDealLabel(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4575,20 +4674,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "note" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationTaskContactTypeStream(AssociationContactCompanyTypeStream): """ @@ -4603,7 +4703,7 @@ class AssociationTaskContactTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4627,15 +4727,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "task" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationTaskContactLabelStream(AssociationContactCompanyTypeStream): """ @@ -4650,7 +4751,7 @@ class AssociationTaskContactLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4669,20 +4770,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "task" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationTaskCompanyTypeStream(AssociationContactCompanyTypeStream): """ @@ -4697,7 +4799,7 @@ class AssociationTaskCompanyTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4721,15 +4823,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "task" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationTaskCompanyLabelstream(AssociationContactCompanyTypeStream): """ @@ -4744,7 +4847,7 @@ class AssociationTaskCompanyLabelstream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4763,20 +4866,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "task" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationTaskDealTypeStream(AssociationContactCompanyTypeStream): """ @@ -4791,7 +4895,7 @@ class AssociationTaskDealTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4815,15 +4919,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "task" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationTaskDealLabelStream(AssociationContactCompanyTypeStream): """ @@ -4838,7 +4943,7 @@ class AssociationTaskDealLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4857,20 +4962,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "task" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationPostalMailContactTypeStream(AssociationContactCompanyTypeStream): """ @@ -4885,7 +4991,7 @@ class AssociationPostalMailContactTypeStream(AssociationContactCompanyTypeStream primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -4909,15 +5015,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationPostalMailContactLabelStream(AssociationContactCompanyTypeStream): """ @@ -4932,7 +5039,7 @@ class AssociationPostalMailContactLabelStream(AssociationContactCompanyTypeStrea primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -4951,20 +5058,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "contact" except: pass - + return super().post_process(row, context) + class AssociationPostalMailCompanyTypeStream(AssociationContactCompanyTypeStream): """ @@ -4979,7 +5087,7 @@ class AssociationPostalMailCompanyTypeStream(AssociationContactCompanyTypeStream primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -5003,15 +5111,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationPostalMailCompanyLabelStream(AssociationContactCompanyTypeStream): """ @@ -5026,7 +5135,7 @@ class AssociationPostalMailCompanyLabelStream(AssociationContactCompanyTypeStrea primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -5045,20 +5154,21 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "company" except: pass - + return super().post_process(row, context) + class AssociationPostalMailDealTypeStream(AssociationContactCompanyTypeStream): """ @@ -5073,7 +5183,7 @@ class AssociationPostalMailDealTypeStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ id, name """ @@ -5097,15 +5207,16 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) + class AssociationPostalMailDealLabelStream(AssociationContactCompanyTypeStream): """ @@ -5120,7 +5231,7 @@ class AssociationPostalMailDealLabelStream(AssociationContactCompanyTypeStream): primary_keys = primary keys for the table replication_key = datetime keys for replication """ - + columns = """ category, typeId, label """ @@ -5139,24 +5250,23 @@ def url_base(self) -> str: version = self.config.get("api_version_4", "") base_url = "https://api.hubapi.com/crm/{}".format(version) return base_url - + def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns """ - + try: row["from_object_type"] = "postal_mail" row["to_object_type"] = "deal" except: pass - + return super().post_process(row, context) def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: - """ - We have type and label api for id and name column and type and label api for category, typeId, and label columns + We have type and label api for id and name column and type and label api for category, typeId, and label columns We can get data from these api and merge these columns from type and label api with merge_dicts function, we can add the records from merge_dicts function to get the output """ @@ -5331,9 +5441,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: note_deal_type = AssoxationNoteDealTypeStream( self._tap, schema={"properties": {}} ) - note_deal_label = AssociationNoteDealLabel( - self._tap, schema={"properties": {}} - ) + note_deal_label = AssociationNoteDealLabel(self._tap, schema={"properties": {}}) task_contact_type = AssociationTaskContactTypeStream( self._tap, schema={"properties": {}} ) @@ -5372,7 +5480,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(contact_company_type.get_records(context)), - list(contact_company_label.get_records(context)) + list(contact_company_label.get_records(context)), ) ] @@ -5380,7 +5488,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(deal_contact_type.get_records(context)), - list(deal_contact_label.get_records(context)) + list(deal_contact_label.get_records(context)), ) ] @@ -5388,7 +5496,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(deal_company_type.get_records(context)), - list(deal_company_label.get_records(context)) + list(deal_company_label.get_records(context)), ) ] @@ -5396,7 +5504,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_contact_type.get_records(context)), - list(ticket_contact_label.get_records(context)) + list(ticket_contact_label.get_records(context)), ) ] @@ -5404,7 +5512,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_company_type.get_records(context)), - list(ticket_company_label.get_records(context)) + list(ticket_company_label.get_records(context)), ) ] @@ -5412,7 +5520,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_deal_type.get_records(context)), - list(ticket_deal_label.get_records(context)) + list(ticket_deal_label.get_records(context)), ) ] @@ -5420,7 +5528,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_communication_type.get_records(context)), - list(ticket_communication_label.get_records(context)) + list(ticket_communication_label.get_records(context)), ) ] @@ -5428,7 +5536,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_call_type.get_records(context)), - list(ticket_call_label.get_records(context)) + list(ticket_call_label.get_records(context)), ) ] @@ -5436,7 +5544,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_meeting_type.get_records(context)), - list(ticket_meeting_label.get_records(context)) + list(ticket_meeting_label.get_records(context)), ) ] @@ -5444,7 +5552,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_note_type.get_records(context)), - list(ticket_note_label.get_records(context)) + list(ticket_note_label.get_records(context)), ) ] @@ -5452,7 +5560,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_task_type.get_records(context)), - list(ticket_task_label.get_records(context)) + list(ticket_task_label.get_records(context)), ) ] @@ -5460,7 +5568,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_email_type.get_records(context)), - list(ticket_email_label.get_records(context)) + list(ticket_email_label.get_records(context)), ) ] @@ -5468,7 +5576,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(ticket_postal_type.get_records(context)), - list(ticket_postal_label.get_records(context)) + list(ticket_postal_label.get_records(context)), ) ] @@ -5476,7 +5584,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(line_deal_type.get_records(context)), - list(line_deal_label.get_records(context)) + list(line_deal_label.get_records(context)), ) ] @@ -5484,7 +5592,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(communication_contact_type.get_records(context)), - list(communication_contact_label.get_records(context)) + list(communication_contact_label.get_records(context)), ) ] @@ -5492,7 +5600,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(communication_company_type.get_records(context)), - list(communication_company_label.get_records(context)) + list(communication_company_label.get_records(context)), ) ] @@ -5500,7 +5608,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(communication_deal_type.get_records(context)), - list(communication_deal_label.get_records(context)) + list(communication_deal_label.get_records(context)), ) ] @@ -5508,7 +5616,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(call_contact_type.get_records(context)), - list(call_contact_label.get_records(context)) + list(call_contact_label.get_records(context)), ) ] @@ -5516,7 +5624,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(call_company_type.get_records(context)), - list(call_company_label.get_records(context)) + list(call_company_label.get_records(context)), ) ] @@ -5524,7 +5632,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(call_deal_type.get_records(context)), - list(call_deal_label.get_records(context)) + list(call_deal_label.get_records(context)), ) ] @@ -5532,7 +5640,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(email_contact_type.get_records(context)), - list(email_contact_label.get_records(context)) + list(email_contact_label.get_records(context)), ) ] @@ -5540,7 +5648,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(email_company_type.get_records(context)), - list(email_company_label.get_records(context)) + list(email_company_label.get_records(context)), ) ] @@ -5548,7 +5656,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(email_deal_type.get_records(context)), - list(email_deal_label.get_records(context)) + list(email_deal_label.get_records(context)), ) ] @@ -5556,7 +5664,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(meeting_contact_type.get_records(context)), - list(meeting_contact_label.get_records(context)) + list(meeting_contact_label.get_records(context)), ) ] @@ -5564,7 +5672,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(meeting_company_type.get_records(context)), - list(meeting_company_label.get_records(context)) + list(meeting_company_label.get_records(context)), ) ] @@ -5572,7 +5680,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(meeting_deal_type.get_records(context)), - list(meeting_deal_label.get_records(context)) + list(meeting_deal_label.get_records(context)), ) ] @@ -5580,7 +5688,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(note_contact_type.get_records(context)), - list(note_contact_label.get_records(context)) + list(note_contact_label.get_records(context)), ) ] @@ -5588,7 +5696,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(note_company_type.get_records(context)), - list(note_company_label.get_records(context)) + list(note_company_label.get_records(context)), ) ] @@ -5596,7 +5704,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(note_deal_type.get_records(context)), - list(note_deal_label.get_records(context)) + list(note_deal_label.get_records(context)), ) ] @@ -5604,7 +5712,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(task_contact_type.get_records(context)), - list(task_contact_label.get_records(context)) + list(task_contact_label.get_records(context)), ) ] @@ -5612,7 +5720,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(task_company_type.get_records(context)), - list(task_company_label.get_records(context)) + list(task_company_label.get_records(context)), ) ] @@ -5620,7 +5728,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(task_deal_type.get_records(context)), - list(task_deal_label.get_records(context)) + list(task_deal_label.get_records(context)), ) ] @@ -5628,7 +5736,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(postal_contact_type.get_records(context)), - list(postal_contact_label.get_records(context)) + list(postal_contact_label.get_records(context)), ) ] @@ -5636,7 +5744,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(postal_company_type.get_records(context)), - list(postal_company_label.get_records(context)) + list(postal_company_label.get_records(context)), ) ] @@ -5644,11 +5752,47 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: self.merge_dicts(x, y) for x, y in zip( list(postal_deal_type.get_records(context)), - list(super().get_records(context)) + list(super().get_records(context)), ) ] - association_records = contact_company_records + deal_contact_records + deal_company_records + ticket_contact_records + ticket_company_records + ticket_deal_records + ticket_communication_records + ticket_call_records + ticket_meeting_records + ticket_note_records + ticket_task_records + ticket_email_records + ticket_postal_records + line_deal_records + communication_contact_records + communication_company_records + communication_deal_records + call_contact_records + call_company_records + call_deal_records + email_contact_records + email_company_records + email_deal_records + meeting_contact_records + meeting_company_records + meeting_deal_records + note_contact_records + note_company_records + note_deal_records + task_contact_records + task_company_records + task_deal_records + postal_contact_records + postal_company_records + postal_deal_records + association_records = ( + contact_company_records + + deal_contact_records + + deal_company_records + + ticket_contact_records + + ticket_company_records + + ticket_deal_records + + ticket_communication_records + + ticket_call_records + + ticket_meeting_records + + ticket_note_records + + ticket_task_records + + ticket_email_records + + ticket_postal_records + + line_deal_records + + communication_contact_records + + communication_company_records + + communication_deal_records + + call_contact_records + + call_company_records + + call_deal_records + + email_contact_records + + email_company_records + + email_deal_records + + meeting_contact_records + + meeting_company_records + + meeting_deal_records + + note_contact_records + + note_company_records + + note_deal_records + + task_contact_records + + task_company_records + + task_deal_records + + postal_contact_records + + postal_company_records + + postal_deal_records + ) return association_records @@ -5661,5 +5805,3 @@ def merge_dicts(self, *dict_args): for dictionary in dict_args: result.update(dictionary) return result - - \ No newline at end of file diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot_sdk/tap.py index 2c2a493..ae01ba0 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot_sdk/tap.py @@ -42,7 +42,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.PropertyNotesStream(self), streams.AssociationPostalMailDealLabelStream(self), ] - + if __name__ == "__main__": TapHubspot.cli() From 344d838f7eed728bee00db781ca93777694cd186 Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Wed, 14 Jun 2023 14:28:41 +0530 Subject: [PATCH 028/105] added pytest --- .github/workflows/ci_workflow.yml | 37 ++++++++++++++++++--------- mypy.ini | 6 +++++ pyproject.toml | 8 +++++- tox.ini | 42 ++++++++++++++++++++++++++++--- 4 files changed, 76 insertions(+), 17 deletions(-) create mode 100644 mypy.ini diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index d305bf8..f57f891 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -7,43 +7,56 @@ on: [push] jobs: linting: + runs-on: ubuntu-latest strategy: matrix: # Only lint using the primary version used for dev - python-version: ["3.9"] + python-version: [3.9] + steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - - name: Install pipx and Poetry + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: 1.1.8 + - name: Install dependencies run: | - pip install pipx poetry + poetry install - name: Run lint command from tox.ini run: | - pipx run tox -e lint + poetry run tox -e lint pytest: + runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + python-version: [3.9] + steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install Poetry - run: | - pip install poetry + uses: snok/install-poetry@v1 + with: + version: 1.1.11 - name: Install dependencies run: | poetry install - name: Test with pytest + id: test_pytest + continue-on-error: true + env: + access_token: ${{ secrets.access_token }} run: | - poetry run pytest + poetry run pytest --capture=no diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..ba621de --- /dev/null +++ b/mypy.ini @@ -0,0 +1,6 @@ +[mypy] +python_version = 3.9 +warn_unused_configs = True + +[mypy-backoff.*] +ignore_missing_imports = True diff --git a/pyproject.toml b/pyproject.toml index 1287a7c..9839d80 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,11 +16,18 @@ singer-sdk = { version="^0.26.0" } fs-s3fs = { version = "^1.1.1", optional = true } requests = "^2.28.2" cached-property = "^1" # Remove after Python 3.7 support is dropped +pendulum = "^2.1.2" +black = "^23.3.0" [tool.poetry.group.dev.dependencies] pytest = "^7.2.1" singer-sdk = { version="^0.26.0", extras = ["testing"] } +[tool.isort] +profile = "black" +multi_line_output = 3 # Vertical Hanging Indent +src_paths = "tap_linkedin" + [tool.poetry.extras] s3 = ["fs-s3fs"] @@ -37,7 +44,6 @@ select = ["ALL"] src = ["tap_hubspot_sdk"] target-version = "py37" - [tool.ruff.flake8-annotations] allow-star-arg-any = true diff --git a/tox.ini b/tox.ini index 70b9e4a..0e0f5d7 100644 --- a/tox.ini +++ b/tox.ini @@ -1,19 +1,53 @@ # This file can be used to customize tox tests as well as other test frameworks like flake8 and mypy [tox] -envlist = py37, py38, py39, py310, py311 +envlist = py39, py310, py311 isolated_build = true [testenv] allowlist_externals = poetry +whitelist_externals = poetry + commands = poetry install -v - poetry run pytest + - poetry run pytest + - poetry run black --check tap_hubspot_sdk/ + - poetry run flake8 tap_hubspot_sdk + - poetry run pydocstyle tap_hubspot_sdk + - poetry run mypy tap_hubspot_sdk --exclude='tap_hubspot_sdk/tests' [testenv:pytest] # Run the python tests. # To execute, run `tox -e pytest` -envlist = py37, py38, py39, py310, py311 +envlist = py39, py310, py311 commands = poetry install -v - poetry run pytest + - poetry run pytest + +[testenv:format] +# Attempt to auto-resolve lint errors before they are raised. +# To execute, run `tox -e format` +commands = + poetry install -v + - poetry run black tap_hubspot_sdk/ + - poetry run isort tap_hubspot_sdk + +[testenv:lint] +# Raise an error if lint and style standards are not met. +# To execute, run `tox -e lint` +commands = + poetry install -v + - poetry run black --check --diff tap_hubspot_sdk/ + - poetry run isort --check tap_hubspot_sdk + - poetry run flake8 tap_hubspot_sdk + - poetry run pydocstyle tap_hubspot_sdk + # refer to mypy.ini for specific settings + - poetry run mypy tap_hubspot_sdk --exclude='tap_hubspot_sdk/tests' + +[flake8] +ignore = W503 +max-line-length = 88 +max-complexity = 10 + +[pydocstyle] +ignore = D105,D203,D213 From 775577b7c81b1eea596b554a52cbea8daa5dfa03 Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Wed, 14 Jun 2023 14:37:25 +0530 Subject: [PATCH 029/105] removed auth.py --- tap_hubspot_sdk/auth.py | 44 ----------------------------------------- 1 file changed, 44 deletions(-) delete mode 100644 tap_hubspot_sdk/auth.py diff --git a/tap_hubspot_sdk/auth.py b/tap_hubspot_sdk/auth.py deleted file mode 100644 index 63d6410..0000000 --- a/tap_hubspot_sdk/auth.py +++ /dev/null @@ -1,44 +0,0 @@ -"""tap-hubspot-sdk Authentication.""" - -from __future__ import annotations - -from singer_sdk.authenticators import OAuthAuthenticator, SingletonMeta - - -# The SingletonMeta metaclass makes your streams reuse the same authenticator instance. -# If this behaviour interferes with your use-case, you can remove the metaclass. -class tapHubspotAuthenticator(OAuthAuthenticator, metaclass=SingletonMeta): - """Authenticator class for tap-hubspot-sdk.""" - - @property - def oauth_request_body(self) -> dict: - """Define the OAuth request body for the AutomaticTestTap API. - - Returns: - A dict with the request body - """ - # TODO: Define the request body needed for the API. - return { - "resource": "https://analysis.windows.net/powerbi/api", - "scope": self.oauth_scopes, - "client_id": self.config["client_id"], - "username": self.config["username"], - "password": self.config["password"], - "grant_type": "password", - } - - @classmethod - def create_for_stream(cls, stream) -> tapHubspotAuthenticator: # noqa: ANN001 - """Instantiate an authenticator for a specific Singer stream. - - Args: - stream: The Singer stream instance. - - Returns: - A new authenticator. - """ - return cls( - stream=stream, - auth_endpoint="TODO: OAuth Endpoint URL", - oauth_scopes="TODO: OAuth Scopes", - ) From 6cd4579f5aab8553cb21f8b70c55968bae69e55f Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Wed, 14 Jun 2023 16:27:55 +0530 Subject: [PATCH 030/105] added tests --- .github/dependabot.yml | 26 ++++++++++++++++ .github/workflows/ci_workflow.yml | 5 +++ .github/workflows/constraints.txt | 5 +++ .github/workflows/release.yaml | 51 +++++++++++++++++++++++++++++++ tap_hubspot_sdk/client.py | 2 -- tests/__init__.py | 2 +- tests/test_core.py | 18 ++++------- 7 files changed, 94 insertions(+), 15 deletions(-) create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/constraints.txt create mode 100644 .github/workflows/release.yaml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..933e6b1 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,26 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: pip + directory: "/" + schedule: + interval: "daily" + commit-message: + prefix: "chore(deps): " + prefix-development: "chore(deps-dev): " + - package-ecosystem: pip + directory: "/.github/workflows" + schedule: + interval: daily + commit-message: + prefix: "ci: " + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "ci: " diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index f57f891..e5236d8 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -58,5 +58,10 @@ jobs: continue-on-error: true env: access_token: ${{ secrets.access_token }} + api_version_1: ${{ secrets.api_version_1 }} + api_version_2: ${{ secrets.api_version_2 }} + api_version_3: ${{ secrets.api_version_3 }} + api_version_4: ${{ secrets.api_version_4 }} + auth_type: ${{ secrets.auth_type }} run: | poetry run pytest --capture=no diff --git a/.github/workflows/constraints.txt b/.github/workflows/constraints.txt new file mode 100644 index 0000000..e20ed80 --- /dev/null +++ b/.github/workflows/constraints.txt @@ -0,0 +1,5 @@ +nox==2023.4.22 +nox-poetry==1.0.2 +pip==23.1.2 +poetry==1.5.1 +poetry-dynamic-versioning==0.22.0 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..41e4146 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,51 @@ +name: Publish with Dynamic Versioning + +on: + release: + types: [published] + +permissions: + contents: write + id-token: write + +jobs: + publish: + name: Publish to PyPI + runs-on: ubuntu-latest + environment: publishing + env: + PIP_CONSTRAINT: .github/workflows/constraints.txt + steps: + - name: Checkout code + uses: actions/checkout@v3.5.2 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v4.6.0 + with: + python-version: "3.10" + + - name: Upgrade pip + run: | + pip install pip + pip --version + - name: Install Poetry + run: | + pipx install poetry + pipx inject poetry poetry-dynamic-versioning[plugin] + poetry --version + poetry self show plugins + - name: Build + run: poetry build + + - name: Upload wheel to release + uses: svenstaro/upload-release-action@v2 + with: + file: dist/*.whl + tag: ${{ github.ref }} + overwrite: true + file_glob: true + + - name: Publish + uses: pypa/gh-action-pypi-publish@v1.8.6 diff --git a/tap_hubspot_sdk/client.py b/tap_hubspot_sdk/client.py index f08fdfc..612775a 100644 --- a/tap_hubspot_sdk/client.py +++ b/tap_hubspot_sdk/client.py @@ -11,8 +11,6 @@ from singer_sdk.pagination import BaseAPIPaginator from singer_sdk.streams import RESTStream -from tap_hubspot_sdk.auth import tapHubspotAuthenticator - if sys.version_info >= (3, 8): from functools import cached_property else: diff --git a/tests/__init__.py b/tests/__init__.py index 01d0fad..f589613 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1 +1 @@ -"""Test suite for tap-hubspot-sdk.""" +"""Test suite for tap-linkedin-ads.""" diff --git a/tests/test_core.py b/tests/test_core.py index c1d2c4d..05b8788 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -1,22 +1,16 @@ """Tests standard tap features using the built-in SDK tests library.""" -import datetime +from singer_sdk.testing import SuiteConfig, get_tap_test_class -from singer_sdk.testing import get_tap_test_class - -from tap_hubspot_sdk.tap import Taptap-hubspot-sdk +from tap_hubspot_sdk.tap import TapHubspot SAMPLE_CONFIG = { - "start_date": datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d"), - # TODO: Initialize minimal tap config + "start_date": "2023-01-01T00:00:00Z", } -# Run standard built-in tap tests from the SDK: -TestTaptap-hubspot-sdk = get_tap_test_class( - tap_class=Taptap-hubspot-sdk, +TestTapHubspot = get_tap_test_class( + TapHubspot, config=SAMPLE_CONFIG, + suite_config=SuiteConfig(), ) - - -# TODO: Create additional tests as appropriate for your tap. From c3a35fc7b7871a3bced3c7452ffc8bb43f9b1551 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 14 Jun 2023 11:53:01 -0400 Subject: [PATCH 031/105] boolean column type changes --- tap_hubspot_sdk/streams.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot_sdk/streams.py index 4a6ba85..586340f 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot_sdk/streams.py @@ -424,7 +424,7 @@ class TicketPipelineStream(HubspotStream): Property("pipelineId", StringType), Property("createdAt", StringType), Property("updatedAt", StringType), - Property("default", StringType), + Property("default", BooleanType), ).to_dict() @property @@ -516,7 +516,7 @@ class DealPipelineStream(HubspotStream): Property("pipelineId", StringType), Property("createdAt", StringType), Property("updatedAt", StringType), - Property("default", StringType), + Property("default", BooleanType), ).to_dict() @property @@ -604,7 +604,7 @@ class EmailSubscriptionStream(HubspotStream): Property("name", StringType), Property("description", StringType), Property("active", BooleanType), - Property("internal", StringType), + Property("internal", BooleanType), Property("category", StringType), Property("channel", StringType), Property("internalName", StringType), From e1594b3ab41ec03fd11a681e44b353d948437a07 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 14 Jun 2023 13:58:33 -0400 Subject: [PATCH 032/105] Remove _sdk and -sdk suffix from connector / directory --- .github/workflows/ci_workflow.yml | 2 +- meltano.yml | 8 ++++---- pyproject.toml | 12 +++++------ tap_hubspot/__init__.py | 1 + {tap_hubspot_sdk => tap_hubspot}/client.py | 2 +- {tap_hubspot_sdk => tap_hubspot}/streams.py | 4 ++-- {tap_hubspot_sdk => tap_hubspot}/tap.py | 8 ++++---- tap_hubspot_sdk/__init__.py | 1 - tests/test_core.py | 2 +- tox.ini | 22 ++++++++++----------- 10 files changed, 31 insertions(+), 31 deletions(-) create mode 100644 tap_hubspot/__init__.py rename {tap_hubspot_sdk => tap_hubspot}/client.py (98%) rename {tap_hubspot_sdk => tap_hubspot}/streams.py (99%) rename {tap_hubspot_sdk => tap_hubspot}/tap.py (89%) delete mode 100644 tap_hubspot_sdk/__init__.py diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index e5236d8..a8b94f7 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -1,7 +1,7 @@ ### A CI workflow template that runs linting and python testing ### TODO: Modify as needed or as desired. -name: Test tap-hubspot-sdk +name: Test tap-hubspot on: [push] diff --git a/meltano.yml b/meltano.yml index ea7d213..77148b9 100644 --- a/meltano.yml +++ b/meltano.yml @@ -1,11 +1,11 @@ version: 1 send_anonymous_usage_stats: true -project_id: "tap-hubspot-sdk" +project_id: "tap-hubspot" default_environment: dev plugins: extractors: - - name: "tap-hubspot-sdk" - namespace: "tap_hubspot_sdk" + - name: "tap-hubspot" + namespace: "tap_hubspot" pip_url: -e . capabilities: - state @@ -39,7 +39,7 @@ plugins: variant: hotgluexyz pip_url: git+https://github.com/hotgluexyz/target-csv.git@0.3.3 config: - destination_path: /Users/neilgorman/Documents/GitHub/tap-linkedin-sdk/output + destination_path: /Users/neilgorman/Documents/GitHub/tap-hubspot/output - name: target-snowflake variant: transferwise pip_url: pipelinewise-target-snowflake diff --git a/pyproject.toml b/pyproject.toml index 9839d80..56128de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,12 +1,12 @@ [tool.poetry] -name = "tap-hubspot-sdk" +name = "tap-hubspot" version = "0.0.1" -description = "`tap-hubspot-sdk` is a Singer tap for tap-hubspot-sdk, built with the Meltano Singer SDK." +description = "`tap-hubspot` is a Singer tap for tap-hubspot, built with the Meltano Singer SDK." readme = "README.md" authors = ["Ethan Stein"] keywords = [ "ELT", - "tap-hubspot-sdk", + "tap-hubspot", ] license = "Apache 2.0" @@ -41,14 +41,14 @@ ignore = [ "ANN102", # missing-type-cls ] select = ["ALL"] -src = ["tap_hubspot_sdk"] +src = ["tap_hubspot"] target-version = "py37" [tool.ruff.flake8-annotations] allow-star-arg-any = true [tool.ruff.isort] -known-first-party = ["tap_hubspot_sdk"] +known-first-party = ["tap_hubspot"] [tool.ruff.pydocstyle] convention = "google" @@ -59,4 +59,4 @@ build-backend = "poetry.core.masonry.api" [tool.poetry.scripts] # CLI declaration -tap-hubspot-sdk = 'tap_hubspot_sdk.tap:TapHubspot.cli' +tap-hubspot = 'tap_hubspot.tap:TapHubspot.cli' diff --git a/tap_hubspot/__init__.py b/tap_hubspot/__init__.py new file mode 100644 index 0000000..c296f12 --- /dev/null +++ b/tap_hubspot/__init__.py @@ -0,0 +1 @@ +"""Tap for tap-hubspot.""" diff --git a/tap_hubspot_sdk/client.py b/tap_hubspot/client.py similarity index 98% rename from tap_hubspot_sdk/client.py rename to tap_hubspot/client.py index 612775a..6be03d4 100644 --- a/tap_hubspot_sdk/client.py +++ b/tap_hubspot/client.py @@ -27,7 +27,7 @@ class HubspotStream(RESTStream): - """tap-hubspot-sdk stream class.""" + """tap-hubspot stream class.""" @property def url_base(self) -> str: diff --git a/tap_hubspot_sdk/streams.py b/tap_hubspot/streams.py similarity index 99% rename from tap_hubspot_sdk/streams.py rename to tap_hubspot/streams.py index 586340f..a45034e 100644 --- a/tap_hubspot_sdk/streams.py +++ b/tap_hubspot/streams.py @@ -1,4 +1,4 @@ -"""Stream type classes for tap-hubspot-sdk.""" +"""Stream type classes for tap-hubspot.""" from __future__ import annotations @@ -6,7 +6,7 @@ from singer_sdk import typing as th # JSON Schema typing helpers -from tap_hubspot_sdk.client import HubspotStream +from tap_hubspot.client import HubspotStream PropertiesList = th.PropertiesList Property = th.Property diff --git a/tap_hubspot_sdk/tap.py b/tap_hubspot/tap.py similarity index 89% rename from tap_hubspot_sdk/tap.py rename to tap_hubspot/tap.py index ae01ba0..d63fb3e 100644 --- a/tap_hubspot_sdk/tap.py +++ b/tap_hubspot/tap.py @@ -1,17 +1,17 @@ -"""tap-hubspot-sdk tap class.""" +"""tap-hubspot tap class.""" from __future__ import annotations from singer_sdk import Tap from singer_sdk import typing as th # JSON schema typing helpers -from tap_hubspot_sdk import streams +from tap_hubspot import streams class TapHubspot(Tap): - """tap-hubspot-sdk tap class.""" + """tap-hubspot tap class.""" - name = "tap-hubspot-sdk" + name = "tap-hubspot" config_jsonschema = th.PropertiesList( th.Property( diff --git a/tap_hubspot_sdk/__init__.py b/tap_hubspot_sdk/__init__.py deleted file mode 100644 index 0b804c2..0000000 --- a/tap_hubspot_sdk/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Tap for tap-hubspot-sdk.""" diff --git a/tests/test_core.py b/tests/test_core.py index 05b8788..da8c549 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -2,7 +2,7 @@ from singer_sdk.testing import SuiteConfig, get_tap_test_class -from tap_hubspot_sdk.tap import TapHubspot +from tap_hubspot.tap import TapHubspot SAMPLE_CONFIG = { "start_date": "2023-01-01T00:00:00Z", diff --git a/tox.ini b/tox.ini index 0e0f5d7..535fb29 100644 --- a/tox.ini +++ b/tox.ini @@ -11,10 +11,10 @@ whitelist_externals = poetry commands = poetry install -v - poetry run pytest - - poetry run black --check tap_hubspot_sdk/ - - poetry run flake8 tap_hubspot_sdk - - poetry run pydocstyle tap_hubspot_sdk - - poetry run mypy tap_hubspot_sdk --exclude='tap_hubspot_sdk/tests' + - poetry run black --check tap_hubspot/ + - poetry run flake8 tap_hubspot + - poetry run pydocstyle tap_hubspot + - poetry run mypy tap_hubspot --exclude='tap_hubspot/tests' [testenv:pytest] # Run the python tests. @@ -29,20 +29,20 @@ commands = # To execute, run `tox -e format` commands = poetry install -v - - poetry run black tap_hubspot_sdk/ - - poetry run isort tap_hubspot_sdk + - poetry run black tap_hubspot/ + - poetry run isort tap_hubspot [testenv:lint] # Raise an error if lint and style standards are not met. # To execute, run `tox -e lint` commands = poetry install -v - - poetry run black --check --diff tap_hubspot_sdk/ - - poetry run isort --check tap_hubspot_sdk - - poetry run flake8 tap_hubspot_sdk - - poetry run pydocstyle tap_hubspot_sdk + - poetry run black --check --diff tap_hubspot/ + - poetry run isort --check tap_hubspot + - poetry run flake8 tap_hubspot + - poetry run pydocstyle tap_hubspot # refer to mypy.ini for specific settings - - poetry run mypy tap_hubspot_sdk --exclude='tap_hubspot_sdk/tests' + - poetry run mypy tap_hubspot --exclude='tap_hubspot/tests' [flake8] ignore = W503 From 938db2f97ae797f941274c0e19c09f0f075b92c6 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 14 Jun 2023 14:35:38 -0400 Subject: [PATCH 033/105] remove sdk suffix from readme --- README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 0cc1c41..f6468b3 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# tap-hubspot-sdk +# tap-hubspot -`tap-hubspot-sdk` is a Singer tap for tap-hubspot-sdk. +`tap-hubspot` is a Singer tap for tap-hubspot. Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps. @@ -34,7 +34,7 @@ Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps. ### Meltano Variables The following config values need to be set in order to use with Meltano. These can be set in `meltano.yml`, via -```meltano config tap-hubspot-sdk set --interactive```, or via the env var mappings shown above. +```meltano config tap-hubspot set --interactive```, or via the env var mappings shown above. - `access_token:` access token from TAP_HUBSPOT_ACCESS_TOKEN variable - `start_date:` start date @@ -47,7 +47,7 @@ The following config values need to be set in order to use with Meltano. These c A full list of supported settings and capabilities for this tap is available by running: ```bash -tap-hubspot-sdk --about +tap-hubspot --about ``` ## Elastic License 2.0 @@ -57,7 +57,7 @@ The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensa ## Installation ```bash -pipx install git+https://github.com/ryan-miranda-partners/tap-hubspot-sdk.git +pipx install git+https://github.com/ryan-miranda-partners/tap-hubspot.git ``` ### Configure using environment variables @@ -72,7 +72,7 @@ A Hubspot access token is required to make API requests. (See [Hubspot API](http ## Usage -You can easily run `tap-hubspot-sdk` by itself or in a pipeline using [Meltano](https://meltano.com/). +You can easily run `tap-hubspot` by itself or in a pipeline using [Meltano](https://meltano.com/). ## Stream Inheritance @@ -81,9 +81,9 @@ This project uses parent-child streams. Learn more about them [here](https://git ### Executing the Tap Directly ```bash -tap-hubspot-sdk --version -tap-hubspot-sdk --help -tap-hubspot-sdk --config CONFIG --discover > ./catalog.json +tap-hubspot --version +tap-hubspot --help +tap-hubspot --config CONFIG --discover > ./catalog.json ``` ## Developer Resources @@ -106,10 +106,10 @@ Create tests within the `tests` subfolder and poetry run pytest ``` -You can also test the `tap-hubspot-sdk` CLI interface directly using `poetry run`: +You can also test the `tap-hubspot` CLI interface directly using `poetry run`: ```bash -poetry run tap-hubspot-sdk --help +poetry run tap-hubspot --help ``` ### Testing with [Meltano](https://www.meltano.com) @@ -126,7 +126,7 @@ Next, install Meltano (if you haven't already) and any needed plugins: # Install meltano pipx install meltano # Initialize meltano within this directory -cd tap-hubspot-sdk +cd tap-hubspot meltano install ``` @@ -134,9 +134,9 @@ Now you can test and orchestrate using Meltano: ```bash # Test invocation: -meltano invoke tap-hubspot-sdk --version +meltano invoke tap-hubspot --version # OR run a test `elt` pipeline: -meltano elt tap-hubspot-sdk target-jsonl +meltano elt tap-hubspot target-jsonl ``` ### SDK Dev Guide From 9c657d866b0fb8b2a21169edd9924120444c0e53 Mon Sep 17 00:00:00 2001 From: Lucas Thorne Date: Wed, 21 Jun 2023 15:44:12 -0400 Subject: [PATCH 034/105] Update actions in ci_workflow --- .github/workflows/ci_workflow.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index a8b94f7..9ca97f6 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -15,9 +15,9 @@ jobs: python-version: [3.9] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install Poetry @@ -41,9 +41,9 @@ jobs: python-version: [3.9] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install Poetry From 6a2a63e3039402e7273598aff23f435c8d59883a Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 21 Jun 2023 16:01:03 -0400 Subject: [PATCH 035/105] change test suite name to hubspot --- tests/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index f589613..913e018 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1 +1 @@ -"""Test suite for tap-linkedin-ads.""" +"""Test suite for tap-hubspot.""" From 1e8c7020eebac5a332ad3c22624e1d664468f2c2 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 21 Jun 2023 16:14:21 -0400 Subject: [PATCH 036/105] remove unused auth types --- tap_hubspot/client.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/tap_hubspot/client.py b/tap_hubspot/client.py index 6be03d4..84625db 100644 --- a/tap_hubspot/client.py +++ b/tap_hubspot/client.py @@ -59,22 +59,6 @@ def authenticator(self) -> _Auth: token=access_token, ) - elif auth_type == "simple": - return SimpleAuthenticator( - self, - auth_headers={ - "Authorization": "Bearer {}".format(access_token), - }, - ) - - elif auth_type == "api": - APIAuthenticatorBase.auth_headers = { - "Authorization": "Bearer {}".format(access_token), - } - return APIAuthenticatorBase( - self, - ) - @property def http_headers(self) -> dict: """Return the http headers needed. From 5a0274278ce494b9f906f86110b211ad0f228f8c Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 21 Jun 2023 17:03:25 -0400 Subject: [PATCH 037/105] Remove API version settings / hardcode for all streams --- .github/workflows/ci_workflow.yml | 4 - README.md | 8 - meltano.yml | 8 - tap_hubspot/streams.py | 267 ++++++++++-------------------- 4 files changed, 89 insertions(+), 198 deletions(-) diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index 9ca97f6..962b7cc 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -58,10 +58,6 @@ jobs: continue-on-error: true env: access_token: ${{ secrets.access_token }} - api_version_1: ${{ secrets.api_version_1 }} - api_version_2: ${{ secrets.api_version_2 }} - api_version_3: ${{ secrets.api_version_3 }} - api_version_4: ${{ secrets.api_version_4 }} auth_type: ${{ secrets.auth_type }} run: | poetry run pytest --capture=no diff --git a/README.md b/README.md index f6468b3..0fb665c 100644 --- a/README.md +++ b/README.md @@ -20,10 +20,6 @@ Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps. | Setting | Required | Default | Description | |:--------------------|:--------:|:-------:|:------------| | access_token | True | None | The token to authenticate against the API service | -| api_version_1 | True | v1.0 | The API version to request data from. | -| api_version_2 | True | v2.0 | The API version to request data from. | -| api_version_3 | True | v3.0 | The API version to request data from. | -| api_version_4 | True | v4.0 | The API version to request data from. | | start_date | False | None | The earliest record date to sync | | end_date | False | None | The latest record date to sync | | stream_maps | False | None | Config object for stream maps capability. For more information check out [Stream Maps](https://sdk.meltano.com/en/latest/stream_maps.html). | @@ -39,10 +35,6 @@ The following config values need to be set in order to use with Meltano. These c - `access_token:` access token from TAP_HUBSPOT_ACCESS_TOKEN variable - `start_date:` start date - `end_date:` end_date -- `api_version_1:` api version -- `api_version_2:` api version -- `api_version_3:` api version -- `api_version_4:` api version A full list of supported settings and capabilities for this tap is available by running: diff --git a/meltano.yml b/meltano.yml index 77148b9..aaa65a9 100644 --- a/meltano.yml +++ b/meltano.yml @@ -21,14 +21,6 @@ plugins: value: '2023-01-01T00:00:00Z' - name: end_date value: '2023-05-22T00:00:00Z' - - name: api_version_1 - value: 'v1' - - name: api_version_2 - value: 'v2' - - name: api_version_3 - value: 'v3' - - name: api_version_4 - value: 'v4' - name: auth_type value: 'oauth' loaders: diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index a45034e..f36067a 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -113,8 +113,7 @@ def url_base(self) -> str: """ Returns an updated path which has the api version """ - version = self.config.get("api_version_1", "") - base_url = "https://api.hubapi.com/contacts/{}".format(version) + base_url = "https://api.hubapi.com/contacts/v1" return base_url def get_url_params( @@ -250,8 +249,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/settings/{}".format(version) + base_url = "https://api.hubapi.com/settings/v3" return base_url def get_url_params( @@ -340,8 +338,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def get_url_params( @@ -432,8 +429,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_1", "") - base_url = "https://api.hubapi.com/crm-pipelines/{}".format(version) + base_url = "https://api.hubapi.com/crm-pipelines/v1" return base_url def get_url_params( @@ -524,8 +520,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_1", "") - base_url = "https://api.hubapi.com/crm-pipelines/{}".format(version) + base_url = "https://api.hubapi.com/crm-pipelines/v1" return base_url def get_url_params( @@ -616,8 +611,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_1", "") - base_url = "https://api.hubapi.com/email/public/{}".format(version) + base_url = "https://api.hubapi.com/email/public/v1" return base_url def get_url_params( @@ -717,8 +711,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def get_url_params( @@ -828,8 +821,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -918,8 +910,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1008,8 +999,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1098,8 +1088,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1188,8 +1177,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1278,8 +1266,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1368,8 +1355,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1458,8 +1444,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1548,8 +1533,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1638,8 +1622,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1728,8 +1711,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1818,8 +1800,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1931,8 +1912,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -1987,8 +1967,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2035,8 +2014,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2083,8 +2061,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2131,8 +2108,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2179,8 +2155,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2227,8 +2202,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2275,8 +2249,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2323,8 +2296,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2371,8 +2343,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2419,8 +2390,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2467,8 +2437,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2515,8 +2484,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2563,8 +2531,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2611,8 +2578,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2659,8 +2625,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2707,8 +2672,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2755,8 +2719,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2803,8 +2766,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2851,8 +2813,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2899,8 +2860,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2947,8 +2907,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -2995,8 +2954,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3043,8 +3001,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3091,8 +3048,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3139,8 +3095,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3187,8 +3142,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3235,8 +3189,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3283,8 +3236,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3331,8 +3283,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3379,8 +3330,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3427,8 +3377,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3475,8 +3424,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3523,8 +3471,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3571,8 +3518,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3619,8 +3565,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3667,8 +3612,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3715,8 +3659,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3763,8 +3706,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3811,8 +3753,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3859,8 +3800,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3907,8 +3847,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -3955,8 +3894,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4003,8 +3941,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4051,8 +3988,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4099,8 +4035,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4147,8 +4082,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4195,8 +4129,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4239,8 +4172,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4287,8 +4219,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4335,8 +4266,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4383,8 +4313,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4431,8 +4360,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4479,8 +4407,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4527,8 +4454,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4575,8 +4501,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4623,8 +4548,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4671,8 +4595,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4719,8 +4642,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4767,8 +4689,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4815,8 +4736,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4863,8 +4783,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4911,8 +4830,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -4959,8 +4877,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -5007,8 +4924,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -5055,8 +4971,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -5103,8 +5018,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -5151,8 +5065,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -5199,8 +5112,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_3", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v3" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: @@ -5247,8 +5159,7 @@ def url_base(self) -> str: """ Returns an updated which has the api version """ - version = self.config.get("api_version_4", "") - base_url = "https://api.hubapi.com/crm/{}".format(version) + base_url = "https://api.hubapi.com/crm/v4" return base_url def post_process(self, row: dict, context: dict | None = None) -> dict | None: From 082ca5997727c056f0afe9ba1c7e55b2a80ab102 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 21 Jun 2023 17:05:50 -0400 Subject: [PATCH 038/105] Remove oauth if statement --- tap_hubspot/client.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/tap_hubspot/client.py b/tap_hubspot/client.py index 84625db..1275e0f 100644 --- a/tap_hubspot/client.py +++ b/tap_hubspot/client.py @@ -18,8 +18,6 @@ from singer_sdk.authenticators import ( BearerTokenAuthenticator, - SimpleAuthenticator, - APIAuthenticatorBase, ) _Auth = Callable[[requests.PreparedRequest], requests.PreparedRequest] @@ -51,13 +49,11 @@ def authenticator(self) -> _Auth: """ access_token = self.config.get("access_token") - auth_type = self.config.get("auth_type") - if auth_type == "oauth": - return BearerTokenAuthenticator.create_for_stream( - self, - token=access_token, - ) + return BearerTokenAuthenticator.create_for_stream( + self, + token=access_token, + ) @property def http_headers(self) -> dict: From 1be22d30febdcc9a0ccd3068e842fb56492a987c Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 22 Jun 2023 12:36:50 -0400 Subject: [PATCH 039/105] Comment out Association_type, remove ci_workflow TODO --- .github/workflows/ci_workflow.yml | 1 - tap_hubspot/tap.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index 962b7cc..444deb2 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -1,5 +1,4 @@ ### A CI workflow template that runs linting and python testing -### TODO: Modify as needed or as desired. name: Test tap-hubspot diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index d63fb3e..810ad9d 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -40,7 +40,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.DealPipelineStream(self), streams.EmailSubscriptionStream(self), streams.PropertyNotesStream(self), - streams.AssociationPostalMailDealLabelStream(self), + #streams.AssociationPostalMailDealLabelStream(self), ] From f2a3c2c5aea4568fe4173557031157554d8cae4b Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 22 Jun 2023 14:37:45 -0400 Subject: [PATCH 040/105] Remove Association stream --- tap_hubspot/streams.py | 3845 ---------------------------------------- tap_hubspot/tap.py | 1 - 2 files changed, 3846 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index f36067a..32e19b1 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -1871,3848 +1871,3 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: return property_records - -class AssociationContactCompanyTypeStream(HubspotStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "contactcompanytype" - path = "/associations/contact/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - schema = PropertiesList( - Property("id", IntegerType), - Property("name", StringType), - Property("from_object_type", StringType), - Property("to_object_type", StringType), - Property("category", StringType), - Property("typeId", IntegerType), - Property("label", StringType), - ).to_dict() - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - - -class AssociationContactCompanyLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "contactcompanylabel" - path = "/associations/contact/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "contact" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationDealContactTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "dealcontacttype" - path = "/associations/deal/contact/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "deal" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationDealContactLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "dealcontactlabel" - path = "/associations/deal/contact/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "deal" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationDealCompanyTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "dealcompanytype" - path = "/associations/deal/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "deal" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationDealCompanyLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "dealcompanylabel" - path = "/associations/deal/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "deal" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketContactTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "ticketcontacttype" - path = "/associations/ticket/contact/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketContactLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "ticketcontactlabel" - path = "/associations/ticket/contact/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketCompanyTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "ticketcompanytype" - path = "/associations/ticket/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketCompanyLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "ticketcompanylabel" - path = "/associations/ticket/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketDealTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "ticketdealtype" - path = "/associations/ticket/deal/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketDealLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "ticketdeallabel" - path = "/associations/ticket/deal/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketCommunicationTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "ticketcommunicationtype" - path = "/associations/ticket/communication/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "communication" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketCommunicationLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "ticketcommunicationlabel" - path = "/associations/ticket/communication/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "communication" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketCallTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "ticketcalltype" - path = "/associations/ticket/call/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "call" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketCallLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "ticketcalllabel" - path = "/associations/ticket/call/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "call" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketMeetingTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "ticketmeetingtype" - path = "/associations/ticket/meeting/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "meeting" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketMeetingLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "ticketmeetinglabel" - path = "/associations/ticket/meeting/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "meeting" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketNoteTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "ticketnotetype" - path = "/associations/ticket/note/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "note" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketNoteLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "ticketnotelabel" - path = "/associations/ticket/note/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "note" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketTaskTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "tickettasktype" - path = "/associations/ticket/task/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "task" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketTaskLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "tickettasklabel" - path = "/associations/ticket/task/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "task" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketEmailTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "ticketemailtype" - path = "/associations/ticket/email/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "email" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketEmailLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "ticketemaillabel" - path = "/associations/ticket/email/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "email" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketPostalMailTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "ticketpostalmailtype" - path = "/associations/ticket/postal_mail/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "postal_mail" - except: - pass - - return super().post_process(row, context) - - -class AssociationTicketPostalMailLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "ticketpostalmaillabel" - path = "/associations/ticket/postal_mail/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "ticket" - row["to_object_type"] = "postal_mail" - except: - pass - - return super().post_process(row, context) - - -class AssociationLineItemDealTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "lineitemdealtype" - path = "/associations/line_item/deal/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "line_item" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationLineItemDealLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "lineitemdeallabel" - path = "/associations/line_item/deal/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "line_item" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationCommunicationContactTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "communicationcontacttype" - path = "/associations/communication/contact/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "communication" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationCommunicationContactLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "communicationcontactlabel" - path = "/associations/communication/contact/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "communication" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationCommunicationCompanyTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "communicationcompanytype" - path = "/associations/communication/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "communication" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationCommunicationCompanyLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "communicationcompanylabel" - path = "/associations/communication/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "communication" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationCommunicationDealTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "communicationdealtype" - path = "/associations/communication/deal/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "communication" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AsociationCommunicationDealLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "communicationdeallabel" - path = "/associations/communication/deal/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "communication" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationCallContactTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "callcontacttype" - path = "/associations/call/contact/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "call" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationCallContactLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "callcontactlabel" - path = "/associations/call/contact/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "call" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationCallCompanyTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "callcompanytype" - path = "/associations/call/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "call" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationCallCompanyLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "callcompanylabel" - path = "/associations/call/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "call" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationCallDealTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "calldealtype" - path = "/associations/call/deal/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "call" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationCallDealLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "calldeallabel" - path = "/associations/call/deal/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "call" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationEmailContactTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "emailcontacttype" - path = "/associations/email/contact/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "email" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationEmailContactLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "emailcontactlabel" - path = "/associations/email/contact/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "email" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationEmailCompanyTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "emailcompanytype" - path = "/associations/email/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "email" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationEmailCompanyLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "emailcompanylabel" - path = "/associations/email/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "email" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationEmailDealTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "emaildealtype" - path = "/associations/email/deal/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "email" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationEmailDealLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "emaildeallabel" - path = "/associations/email/deal/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "email" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationMeetingContactTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "meetingcontacttype" - path = "/associations/meeting/contact/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "meeting" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationMeetingContactLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "meetingcontactlabel" - path = "/associations/meeting/contact/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - try: - row["from_object_type"] = "meeting" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationMeetingCompanyTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "meetingcompanytype" - path = "/associations/meeting/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "meeting" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationMeetingCompanyLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "meetingcompanylabel" - path = "/associations/meeting/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "meeting" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationMeetingDealTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "meetingdealtype" - path = "/associations/meeting/deal/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "meeting" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationMeetingDealLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "meetingdeallabel" - path = "/associations/meeting/deal/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "meeting" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationNoteContactTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "notecontacttype" - path = "/associations/note/contact/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "note" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationNoteContactLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "notecontactlabel" - path = "/associations/note/contact/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "note" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationNoteCompanyTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "notecompanytype" - path = "/associations/note/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "note" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationNoteCompanyLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "notecompanylabel" - path = "/associations/note/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "note" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssoxationNoteDealTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "notedealtype" - path = "/associations/note/deal/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "note" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationNoteDealLabel(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "notedeallabel" - path = "/associations/note/deal/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "note" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationTaskContactTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "taskcontacttype" - path = "/associations/task/contact/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "task" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationTaskContactLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "taskcontactlabel" - path = "/associations/task/contact/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "task" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationTaskCompanyTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "taskcompanytype" - path = "/associations/task/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "task" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationTaskCompanyLabelstream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "taskcompanystream" - path = "/associations/task/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "task" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationTaskDealTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "taskdealtype" - path = "/associations/task/deal/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "task" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationTaskDealLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "taskdeallabel" - path = "/associations/task/deal/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "task" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationPostalMailContactTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "postalmalicontacttype" - path = "/associations/postal_mail/contact/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "postal_mail" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationPostalMailContactLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "postalmailcontactlabel" - path = "/associations/postal_mail/contact/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "postal_mail" - row["to_object_type"] = "contact" - except: - pass - - return super().post_process(row, context) - - -class AssociationPostalMailCompanyTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "postalmailcompanytype" - path = "/associations/postal_mail/company/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "postal_mail" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationPostalMailCompanyLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "postalmailcompanylabel" - path = "/associations/postal_mail/company/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "postal_mail" - row["to_object_type"] = "company" - except: - pass - - return super().post_process(row, context) - - -class AssociationPostalMailDealTypeStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - id, name - """ - - name = "postalmaildealtype" - path = "/associations/postal_mail/deal/types?fields={}".format(columns) - primary_keys = ["id"] - replication_key = "id" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "postal_mail" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - -class AssociationPostalMailDealLabelStream(AssociationContactCompanyTypeStream): - - """ - https://legacydocs.hubspot.com/docs/methods/crm-associations/get-associations - """ - - """ - columns: columns which will be added to fields parameter in api - name: stream name - path: path which will be added to api url in client.py - schema: instream schema - primary_keys = primary keys for the table - replication_key = datetime keys for replication - """ - - columns = """ - category, typeId, label - """ - - name = "association_type" - path = "/associations/postal_mail/deal/labels?fields={}".format(columns) - primary_keys = ["typeId"] - replication_key = "typeId" - replication_method = "incremental" - - @property - def url_base(self) -> str: - """ - Returns an updated which has the api version - """ - base_url = "https://api.hubapi.com/crm/v4" - return base_url - - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["from_object_type"] = "postal_mail" - row["to_object_type"] = "deal" - except: - pass - - return super().post_process(row, context) - - def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: - """ - We have type and label api for id and name column and type and label api for category, typeId, and label columns - We can get data from these api and merge these columns from type and label api with merge_dicts function, we can add the records from merge_dicts function to get the output - """ - - contact_company_type = AssociationContactCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - contact_company_label = AssociationContactCompanyLabelStream( - self._tap, schema={"properties": {}} - ) - deal_contact_type = AssociationDealContactTypeStream( - self._tap, schema={"properties": {}} - ) - deal_contact_label = AssociationDealContactLabelStream( - self._tap, schema={"properties": {}} - ) - deal_company_type = AssociationDealCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - deal_company_label = AssociationDealCompanyLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_contact_type = AssociationTicketContactTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_contact_label = AssociationTicketContactLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_company_type = AssociationTicketCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_company_label = AssociationTicketCompanyLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_deal_type = AssociationTicketDealTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_deal_label = AssociationTicketDealLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_communication_type = AssociationTicketCommunicationTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_communication_label = AssociationTicketCommunicationLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_call_type = AssociationTicketCallTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_call_label = AssociationTicketCallLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_meeting_type = AssociationTicketMeetingTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_meeting_label = AssociationTicketMeetingLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_note_type = AssociationTicketNoteTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_note_label = AssociationTicketNoteLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_task_type = AssociationTicketTaskTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_task_label = AssociationTicketTaskLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_email_type = AssociationTicketEmailTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_email_label = AssociationTicketEmailLabelStream( - self._tap, schema={"properties": {}} - ) - ticket_postal_type = AssociationTicketPostalMailTypeStream( - self._tap, schema={"properties": {}} - ) - ticket_postal_label = AssociationTicketPostalMailLabelStream( - self._tap, schema={"properties": {}} - ) - line_deal_type = AssociationLineItemDealTypeStream( - self._tap, schema={"properties": {}} - ) - line_deal_label = AssociationLineItemDealLabelStream( - self._tap, schema={"properties": {}} - ) - communication_contact_type = AssociationCommunicationContactTypeStream( - self._tap, schema={"properties": {}} - ) - communication_contact_label = AssociationCommunicationContactLabelStream( - self._tap, schema={"properties": {}} - ) - communication_company_type = AssociationCommunicationCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - communication_company_label = AssociationCommunicationCompanyLabelStream( - self._tap, schema={"properties": {}} - ) - communication_deal_type = AssociationCommunicationDealTypeStream( - self._tap, schema={"properties": {}} - ) - communication_deal_label = AsociationCommunicationDealLabelStream( - self._tap, schema={"properties": {}} - ) - call_contact_type = AssociationCallContactTypeStream( - self._tap, schema={"properties": {}} - ) - call_contact_label = AssociationCallContactLabelStream( - self._tap, schema={"properties": {}} - ) - call_company_type = AssociationCallCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - call_company_label = AssociationCallCompanyLabelStream( - self._tap, schema={"properties": {}} - ) - call_deal_type = AssociationCallDealTypeStream( - self._tap, schema={"properties": {}} - ) - call_deal_label = AssociationCallDealLabelStream( - self._tap, schema={"properties": {}} - ) - email_contact_type = AssociationEmailContactTypeStream( - self._tap, schema={"properties": {}} - ) - email_contact_label = AssociationEmailContactLabelStream( - self._tap, schema={"properties": {}} - ) - email_company_type = AssociationEmailCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - email_company_label = AssociationEmailCompanyLabelStream( - self._tap, schema={"properties": {}} - ) - email_deal_type = AssociationEmailDealTypeStream( - self._tap, schema={"properties": {}} - ) - email_deal_label = AssociationEmailDealLabelStream( - self._tap, schema={"properties": {}} - ) - meeting_contact_type = AssociationMeetingContactTypeStream( - self._tap, schema={"properties": {}} - ) - meeting_contact_label = AssociationMeetingContactLabelStream( - self._tap, schema={"properties": {}} - ) - meeting_company_type = AssociationMeetingCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - meeting_company_label = AssociationMeetingCompanyLabelStream( - self._tap, schema={"properties": {}} - ) - meeting_deal_type = AssociationMeetingDealTypeStream( - self._tap, schema={"properties": {}} - ) - meeting_deal_label = AssociationMeetingDealLabelStream( - self._tap, schema={"properties": {}} - ) - note_contact_type = AssociationNoteContactTypeStream( - self._tap, schema={"properties": {}} - ) - note_contact_label = AssociationNoteContactLabelStream( - self._tap, schema={"properties": {}} - ) - note_company_type = AssociationNoteCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - note_company_label = AssociationNoteCompanyLabelStream( - self._tap, schema={"properties": {}} - ) - note_deal_type = AssoxationNoteDealTypeStream( - self._tap, schema={"properties": {}} - ) - note_deal_label = AssociationNoteDealLabel(self._tap, schema={"properties": {}}) - task_contact_type = AssociationTaskContactTypeStream( - self._tap, schema={"properties": {}} - ) - task_contact_label = AssociationTaskContactLabelStream( - self._tap, schema={"properties": {}} - ) - task_company_type = AssociationTaskCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - task_company_label = AssociationTaskCompanyLabelstream( - self._tap, schema={"properties": {}} - ) - task_deal_type = AssociationTaskDealTypeStream( - self._tap, schema={"properties": {}} - ) - task_deal_label = AssociationTaskDealLabelStream( - self._tap, schema={"properties": {}} - ) - postal_contact_type = AssociationPostalMailContactTypeStream( - self._tap, schema={"properties": {}} - ) - postal_contact_label = AssociationPostalMailContactLabelStream( - self._tap, schema={"properties": {}} - ) - postal_company_type = AssociationPostalMailCompanyTypeStream( - self._tap, schema={"properties": {}} - ) - postal_company_label = AssociationPostalMailCompanyLabelStream( - self._tap, schema={"properties": {}} - ) - postal_deal_type = AssociationPostalMailDealTypeStream( - self._tap, schema={"properties": {}} - ) - - contact_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(contact_company_type.get_records(context)), - list(contact_company_label.get_records(context)), - ) - ] - - deal_contact_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(deal_contact_type.get_records(context)), - list(deal_contact_label.get_records(context)), - ) - ] - - deal_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(deal_company_type.get_records(context)), - list(deal_company_label.get_records(context)), - ) - ] - - ticket_contact_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_contact_type.get_records(context)), - list(ticket_contact_label.get_records(context)), - ) - ] - - ticket_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_company_type.get_records(context)), - list(ticket_company_label.get_records(context)), - ) - ] - - ticket_deal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_deal_type.get_records(context)), - list(ticket_deal_label.get_records(context)), - ) - ] - - ticket_communication_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_communication_type.get_records(context)), - list(ticket_communication_label.get_records(context)), - ) - ] - - ticket_call_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_call_type.get_records(context)), - list(ticket_call_label.get_records(context)), - ) - ] - - ticket_meeting_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_meeting_type.get_records(context)), - list(ticket_meeting_label.get_records(context)), - ) - ] - - ticket_note_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_note_type.get_records(context)), - list(ticket_note_label.get_records(context)), - ) - ] - - ticket_task_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_task_type.get_records(context)), - list(ticket_task_label.get_records(context)), - ) - ] - - ticket_email_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_email_type.get_records(context)), - list(ticket_email_label.get_records(context)), - ) - ] - - ticket_postal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(ticket_postal_type.get_records(context)), - list(ticket_postal_label.get_records(context)), - ) - ] - - line_deal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(line_deal_type.get_records(context)), - list(line_deal_label.get_records(context)), - ) - ] - - communication_contact_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(communication_contact_type.get_records(context)), - list(communication_contact_label.get_records(context)), - ) - ] - - communication_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(communication_company_type.get_records(context)), - list(communication_company_label.get_records(context)), - ) - ] - - communication_deal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(communication_deal_type.get_records(context)), - list(communication_deal_label.get_records(context)), - ) - ] - - call_contact_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(call_contact_type.get_records(context)), - list(call_contact_label.get_records(context)), - ) - ] - - call_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(call_company_type.get_records(context)), - list(call_company_label.get_records(context)), - ) - ] - - call_deal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(call_deal_type.get_records(context)), - list(call_deal_label.get_records(context)), - ) - ] - - email_contact_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(email_contact_type.get_records(context)), - list(email_contact_label.get_records(context)), - ) - ] - - email_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(email_company_type.get_records(context)), - list(email_company_label.get_records(context)), - ) - ] - - email_deal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(email_deal_type.get_records(context)), - list(email_deal_label.get_records(context)), - ) - ] - - meeting_contact_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(meeting_contact_type.get_records(context)), - list(meeting_contact_label.get_records(context)), - ) - ] - - meeting_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(meeting_company_type.get_records(context)), - list(meeting_company_label.get_records(context)), - ) - ] - - meeting_deal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(meeting_deal_type.get_records(context)), - list(meeting_deal_label.get_records(context)), - ) - ] - - note_contact_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(note_contact_type.get_records(context)), - list(note_contact_label.get_records(context)), - ) - ] - - note_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(note_company_type.get_records(context)), - list(note_company_label.get_records(context)), - ) - ] - - note_deal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(note_deal_type.get_records(context)), - list(note_deal_label.get_records(context)), - ) - ] - - task_contact_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(task_contact_type.get_records(context)), - list(task_contact_label.get_records(context)), - ) - ] - - task_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(task_company_type.get_records(context)), - list(task_company_label.get_records(context)), - ) - ] - - task_deal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(task_deal_type.get_records(context)), - list(task_deal_label.get_records(context)), - ) - ] - - postal_contact_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(postal_contact_type.get_records(context)), - list(postal_contact_label.get_records(context)), - ) - ] - - postal_company_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(postal_company_type.get_records(context)), - list(postal_company_label.get_records(context)), - ) - ] - - postal_deal_records = [ - self.merge_dicts(x, y) - for x, y in zip( - list(postal_deal_type.get_records(context)), - list(super().get_records(context)), - ) - ] - - association_records = ( - contact_company_records - + deal_contact_records - + deal_company_records - + ticket_contact_records - + ticket_company_records - + ticket_deal_records - + ticket_communication_records - + ticket_call_records - + ticket_meeting_records - + ticket_note_records - + ticket_task_records - + ticket_email_records - + ticket_postal_records - + line_deal_records - + communication_contact_records - + communication_company_records - + communication_deal_records - + call_contact_records - + call_company_records - + call_deal_records - + email_contact_records - + email_company_records - + email_deal_records - + meeting_contact_records - + meeting_company_records - + meeting_deal_records - + note_contact_records - + note_company_records - + note_deal_records - + task_contact_records - + task_company_records - + task_deal_records - + postal_contact_records - + postal_company_records - + postal_deal_records - ) - - return association_records - - def merge_dicts(self, *dict_args): - """ - Given any number of dictionaries, shallow copy and merge into a new dict, - precedence goes to key-value pairs in latter dictionaries. - """ - result = {} - for dictionary in dict_args: - result.update(dictionary) - return result diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 810ad9d..77d6849 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -40,7 +40,6 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.DealPipelineStream(self), streams.EmailSubscriptionStream(self), streams.PropertyNotesStream(self), - #streams.AssociationPostalMailDealLabelStream(self), ] From 486a9c6cb746d57489a53f36f11c120de75f0297 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 22 Jun 2023 14:39:13 -0400 Subject: [PATCH 041/105] update poetry install in ci_workflow --- .github/workflows/ci_workflow.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index 444deb2..0b2752a 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -20,9 +20,8 @@ jobs: with: python-version: ${{ matrix.python-version }} - name: Install Poetry - uses: snok/install-poetry@v1 - with: - version: 1.1.8 + run: | + pip install poetry - name: Install dependencies run: | poetry install @@ -46,9 +45,8 @@ jobs: with: python-version: ${{ matrix.python-version }} - name: Install Poetry - uses: snok/install-poetry@v1 - with: - version: 1.1.11 + run: | + pip install poetry - name: Install dependencies run: | poetry install From ecb0563fbd51ff3a64510be11c15db405427170f Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 22 Jun 2023 14:41:39 -0400 Subject: [PATCH 042/105] Remove extra line (linting) --- tap_hubspot/streams.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 32e19b1..f227671 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -1870,4 +1870,3 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: ) return property_records - From db933cca2db392e6421128d97fccc3a3a3d6b5ec Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 22 Jun 2023 15:03:24 -0400 Subject: [PATCH 043/105] Remove auth_type from ci_workflow --- .github/workflows/ci_workflow.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index 0b2752a..efba581 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -55,6 +55,5 @@ jobs: continue-on-error: true env: access_token: ${{ secrets.access_token }} - auth_type: ${{ secrets.auth_type }} run: | poetry run pytest --capture=no From b096a7b2aa28418ee92753cb0fa835fbccc7eab1 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 22 Jun 2023 15:05:53 -0400 Subject: [PATCH 044/105] remove continue-on-error in workflow --- .github/workflows/ci_workflow.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index efba581..2aae7e0 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -52,7 +52,7 @@ jobs: poetry install - name: Test with pytest id: test_pytest - continue-on-error: true + continue-on-error: false env: access_token: ${{ secrets.access_token }} run: | From 30c62f425e4b979ea41159b0809a81be17f69da6 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 22 Jun 2023 16:40:54 -0400 Subject: [PATCH 045/105] Access_token env var in ci_workflow --- .github/workflows/ci_workflow.yml | 2 +- tap_hubspot/tap.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index 2aae7e0..174b432 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -54,6 +54,6 @@ jobs: id: test_pytest continue-on-error: false env: - access_token: ${{ secrets.access_token }} + TAP_HUBSPOT_ACCESS_TOKEN: ${{ secrets.access_token }} run: | poetry run pytest --capture=no diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 77d6849..eba5083 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -17,6 +17,7 @@ class TapHubspot(Tap): th.Property( "access_token", th.StringType, + required=True, description="The token to authenticate against the API service", ), th.Property( From 119d92a1b4df6cfb3350d548706c8910ad8e3ece Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Fri, 23 Jun 2023 13:03:34 -0400 Subject: [PATCH 046/105] resolve linting test --- .github/workflows/ci_workflow.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml index 174b432..d5f30f6 100644 --- a/.github/workflows/ci_workflow.yml +++ b/.github/workflows/ci_workflow.yml @@ -19,15 +19,12 @@ jobs: uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Install Poetry + - name: Install pipx and Poetry run: | - pip install poetry - - name: Install dependencies - run: | - poetry install + pip install pipx poetry - name: Run lint command from tox.ini run: | - poetry run tox -e lint + pipx run tox -e lint pytest: From a780a72de1aa099dcc27f725458f2a9e7d58184f Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 26 Jun 2023 13:18:26 -0400 Subject: [PATCH 047/105] Remove schema directory line --- tap_hubspot/client.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tap_hubspot/client.py b/tap_hubspot/client.py index 1275e0f..017b53e 100644 --- a/tap_hubspot/client.py +++ b/tap_hubspot/client.py @@ -21,7 +21,6 @@ ) _Auth = Callable[[requests.PreparedRequest], requests.PreparedRequest] -# SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") class HubspotStream(RESTStream): From 28f0da180ac4c04779ca31148d7a1e690a60a503 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 26 Jun 2023 13:18:51 -0400 Subject: [PATCH 048/105] Remove unused dependencies --- pyproject.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 56128de..a80bc73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,8 +16,6 @@ singer-sdk = { version="^0.26.0" } fs-s3fs = { version = "^1.1.1", optional = true } requests = "^2.28.2" cached-property = "^1" # Remove after Python 3.7 support is dropped -pendulum = "^2.1.2" -black = "^23.3.0" [tool.poetry.group.dev.dependencies] pytest = "^7.2.1" From 9e1d18b54a15459eb8630066e1c45b59352cc9c7 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 26 Jun 2023 13:19:29 -0400 Subject: [PATCH 049/105] remove unused replication keys in user stream --- tap_hubspot/streams.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index f227671..b02fd0c 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -234,8 +234,6 @@ class UsersStream(HubspotStream): name = "users" path = "/users?fields={}".format(columns) primary_keys = ["id"] - # replication_key = "LastModifiedDate" - # replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), From f61253a918e7369112c1324e18e99bea09d77e86 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 26 Jun 2023 13:21:04 -0400 Subject: [PATCH 050/105] edit Hubspot readme and init descriptions --- README.md | 2 +- tap_hubspot/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0fb665c..a7bcf68 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # tap-hubspot -`tap-hubspot` is a Singer tap for tap-hubspot. +`tap-hubspot` is a Singer tap for Hubspot. Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps. diff --git a/tap_hubspot/__init__.py b/tap_hubspot/__init__.py index c296f12..61a3893 100644 --- a/tap_hubspot/__init__.py +++ b/tap_hubspot/__init__.py @@ -1 +1 @@ -"""Tap for tap-hubspot.""" +"""Tap for Hubspot.""" From 43b29ac86ca1ea2e36419bc4f661cd202c64d4f8 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 26 Jun 2023 13:22:07 -0400 Subject: [PATCH 051/105] Remove redundant 'Meltano Variables' Section in readme --- README.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/README.md b/README.md index a7bcf68..d11d34c 100644 --- a/README.md +++ b/README.md @@ -27,14 +27,6 @@ Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps. | flattening_enabled | False | None | 'True' to enable schema flattening and automatically expand nested properties. | | flattening_max_depth| False | None | The max depth to flatten schemas. | -### Meltano Variables - -The following config values need to be set in order to use with Meltano. These can be set in `meltano.yml`, via -```meltano config tap-hubspot set --interactive```, or via the env var mappings shown above. - -- `access_token:` access token from TAP_HUBSPOT_ACCESS_TOKEN variable -- `start_date:` start date -- `end_date:` end_date A full list of supported settings and capabilities for this tap is available by running: From d8bfc182b772178c95962a0096f50ff3a6e39fad Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 27 Jun 2023 11:03:59 +0530 Subject: [PATCH 052/105] updated list stream to contact stream --- tap_hubspot/streams.py | 2 +- tap_hubspot/tap.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index b02fd0c..5317dba 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -18,7 +18,7 @@ IntegerType = th.IntegerType -class ListsStream(HubspotStream): +class ContactStream(HubspotStream): """ https://legacydocs.hubspot.com/docs/methods/lists/get_lists diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index eba5083..122eec5 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -34,7 +34,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: A list of discovered streams. """ return [ - streams.ListsStream(self), + streams.ContactStream(self), streams.UsersStream(self), streams.OwnersStream(self), streams.TicketPipelineStream(self), From 47487901a229fc69dfc72f69b878cfda7a95f7ed Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 27 Jun 2023 17:27:39 +0530 Subject: [PATCH 053/105] removed auth from meltano --- meltano.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/meltano.yml b/meltano.yml index aaa65a9..995f6f4 100644 --- a/meltano.yml +++ b/meltano.yml @@ -20,9 +20,7 @@ plugins: - name: start_date value: '2023-01-01T00:00:00Z' - name: end_date - value: '2023-05-22T00:00:00Z' - - name: auth_type - value: 'oauth' + value: '2023-05-22T00:00:00Z' loaders: - name: target-jsonl variant: andyh1203 From c8e2cd31990d44a264f55a61ff0c8401f2f864bd Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Tue, 27 Jun 2023 18:13:24 -0400 Subject: [PATCH 054/105] add annotations to conftest --- tests/conftest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/conftest.py b/tests/conftest.py index 6bb3ec2..4133230 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,4 @@ """Test Configuration.""" +from __future__ import annotations pytest_plugins = ("singer_sdk.testing.pytest_plugin",) From 0b5acecac5ee6e45847d4392071b59058d1906bc Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 14:08:46 -0400 Subject: [PATCH 055/105] Add Company Stream --- tap_hubspot/streams.py | 98 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 15 ++++--- 2 files changed, 106 insertions(+), 7 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 5317dba..0f77102 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -1868,3 +1868,101 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: ) return property_records + +class CompanyStream(HubspotStream): + + """ + https://developers.hubspot.com/docs/api/crm/companies + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, email, roleIds, primaryteamid + """ + + name = "companies" + path = "/objects/companies" + primary_keys = ["id"] + + schema = PropertiesList( + Property("id", StringType), + Property( + "properties", + ObjectType( + Property("city", StringType), + Property("createdDate", StringType), + Property("domain", StringType), + Property("hs_lastmodifieddate", StringType), + Property("industry", StringType), + Property("name", StringType), + Property("phone", StringType), + Property("state", StringType), + ), + ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), + + ).to_dict() + + + + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 122eec5..5cce7eb 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -34,13 +34,14 @@ def discover_streams(self) -> list[streams.HubspotStream]: A list of discovered streams. """ return [ - streams.ContactStream(self), - streams.UsersStream(self), - streams.OwnersStream(self), - streams.TicketPipelineStream(self), - streams.DealPipelineStream(self), - streams.EmailSubscriptionStream(self), - streams.PropertyNotesStream(self), + #streams.ContactStream(self), + #streams.UsersStream(self), + #streams.OwnersStream(self), + #streams.TicketPipelineStream(self), + #streams.DealPipelineStream(self), + #streams.EmailSubscriptionStream(self), + #streams.PropertyNotesStream(self), + streams.CompanyStream(self), ] From 6fe89b9a1f37ed7f87d8b88de863514f2c231a0d Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 14:12:21 -0400 Subject: [PATCH 056/105] add Deal stream --- tap_hubspot/streams.py | 98 +++++++++++++++++++++++++++++++++++++++++- tap_hubspot/tap.py | 1 + 2 files changed, 98 insertions(+), 1 deletion(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 0f77102..c83e765 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -1913,7 +1913,7 @@ class CompanyStream(HubspotStream): ).to_dict() - + @property def url_base(self) -> str: @@ -1966,3 +1966,99 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: results = resp_json yield from results + + +class DealStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/companies + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, email, roleIds, primaryteamid + """ + + name = "companies" + path = "/objects/deals" + primary_keys = ["id"] + + schema = PropertiesList( + Property("id", StringType), + Property( + "properties", + ObjectType( + Property("amount", StringType), + Property("createdDate", StringType), + Property("closedDate", StringType), + Property("dealname", StringType), + Property("dealstage", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hubspot_owner_id", StringType), + Property("pipeline", StringType), + ), + ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), + + ).to_dict() + + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results \ No newline at end of file diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 5cce7eb..ddb6819 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -42,6 +42,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: #streams.EmailSubscriptionStream(self), #streams.PropertyNotesStream(self), streams.CompanyStream(self), + streams.DealStream(self), ] From fcd9f7e804328e84679e475e1f14c4222d9c7b80 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 14:16:59 -0400 Subject: [PATCH 057/105] update deal stream table --- tap_hubspot/streams.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index c83e765..7231360 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -1970,7 +1970,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: class DealStream(HubspotStream): """ - https://developers.hubspot.com/docs/api/crm/companies + https://developers.hubspot.com/docs/api/crm/deals """ """ @@ -1986,7 +1986,7 @@ class DealStream(HubspotStream): id, email, roleIds, primaryteamid """ - name = "companies" + name = "deals" path = "/objects/deals" primary_keys = ["id"] From ac18a1693aa2cfee9f986b76c9300059c87da17d Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 14:34:28 -0400 Subject: [PATCH 058/105] Add FeedbackSubmissionsStream --- tap_hubspot/streams.py | 96 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 97 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 7231360..94b6b9b 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2011,6 +2011,102 @@ class DealStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class FeedbackSubmissionsStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/feedback-submissions + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, email, roleIds, primaryteamid + """ + + name = "feedbacksubmissions" + path = "/objects/feedback_submissions" + primary_keys = ["id"] + + schema = PropertiesList( + Property("id", StringType), + Property( + "properties", + ObjectType( + Property("hs_content", StringType), + Property("hs_ingestion_id", StringType), + Property("hs_response_group", StringType), + Property("hs_submission_name", StringType), + Property("hs_survey_channel", StringType), + Property("hs_survey_id", StringType), + Property("hs_survey_name", StringType), + Property("hs_survey_type", StringType), + Property("hs_value", StringType), + ), + ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index ddb6819..a4986cf 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -43,6 +43,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: #streams.PropertyNotesStream(self), streams.CompanyStream(self), streams.DealStream(self), + #streams.FeedbackSubmissionsStream(self), ] From 99704f1cc5a74e94e866e6f39a414c0b1ac9a242 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 14:39:44 -0400 Subject: [PATCH 059/105] add LineItemStream --- tap_hubspot/streams.py | 95 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 96 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 94b6b9b..bb6da3d 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2107,6 +2107,101 @@ class FeedbackSubmissionsStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class LineItemStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/line-items + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, createdate, hs_lastmodifieddate, hs_product_id, hs_recurring_billing_period, name, price, quantity, recurringbillingfrequency, createdAt, updatedAt, archived + """ + + name = "lineitems" + path = "/objects/line_items" + primary_keys = ["id"] + + schema = PropertiesList( + Property("id", StringType), + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_product_id", StringType), + Property("hs_recurring_billing_period", StringType), + Property("name", StringType), + Property("price", StringType), + Property("quantity", StringType), + Property("recurringbillingfrequency", StringType), + ), + ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index a4986cf..70af526 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -44,6 +44,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.CompanyStream(self), streams.DealStream(self), #streams.FeedbackSubmissionsStream(self), + streams.LineItemStream(self), ] From 209091330f2c46f7b9eadc05bc993be02b74d6fb Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 14:41:51 -0400 Subject: [PATCH 060/105] update columns for object streams --- tap_hubspot/streams.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index bb6da3d..29b0f4e 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -1885,7 +1885,7 @@ class CompanyStream(HubspotStream): """ columns = """ - id, email, roleIds, primaryteamid + id, properties, createdAt, updatedAt, archived """ name = "companies" @@ -1983,7 +1983,7 @@ class DealStream(HubspotStream): """ columns = """ - id, email, roleIds, primaryteamid + id, properties, createdAt, updatedAt, archived """ name = "deals" @@ -2078,7 +2078,7 @@ class FeedbackSubmissionsStream(HubspotStream): """ columns = """ - id, email, roleIds, primaryteamid + id, properties, createdAt, updatedAt, archived """ name = "feedbacksubmissions" @@ -2174,7 +2174,7 @@ class LineItemStream(HubspotStream): """ columns = """ - id, createdate, hs_lastmodifieddate, hs_product_id, hs_recurring_billing_period, name, price, quantity, recurringbillingfrequency, createdAt, updatedAt, archived + id, properties, createdAt, updatedAt, archived """ name = "lineitems" From 08a393ec21408814a5a18949df505e8b016b7d62 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 14:46:38 -0400 Subject: [PATCH 061/105] add ProductStream --- tap_hubspot/streams.py | 95 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 96 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 29b0f4e..e8f504e 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2202,6 +2202,101 @@ class LineItemStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class ProductStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/products + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + id, properties, createdAt, updatedAt, archived + """ + + name = "product" + path = "/objects/product" + primary_keys = ["id"] + + schema = PropertiesList( + Property("id", StringType), + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("description", StringType), + Property("hs_cost_of_goods_sold", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_recurring_billing_period", StringType), + Property("hs_sku", StringType), + Property("name", StringType), + Property("price", StringType), + ), + ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 70af526..763ffda 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -45,6 +45,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.DealStream(self), #streams.FeedbackSubmissionsStream(self), streams.LineItemStream(self), + streams.ProductStream(self), ] From 1fca84fa6d0bdf373486269c81525b2653214462 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 14:56:17 -0400 Subject: [PATCH 062/105] add TicketStream --- tap_hubspot/streams.py | 92 +++++++++++++++++++++++++++++++++++++++++- tap_hubspot/tap.py | 1 + 2 files changed, 92 insertions(+), 1 deletion(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index e8f504e..6cfd5ab 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2273,7 +2273,7 @@ class ProductStream(HubspotStream): """ name = "product" - path = "/objects/product" + path = "/objects/products" primary_keys = ["id"] schema = PropertiesList( @@ -2297,6 +2297,96 @@ class ProductStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class TicketStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/tickets + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "ticket" + path = "/objects/tickets" + primary_keys = ["id"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_pipeline", StringType), + Property("hs_pipeline_stage", StringType), + Property("hs_ticket_priority", StringType), + Property("hubspot_owner_id", StringType), + Property("subject", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 763ffda..cccb246 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -46,6 +46,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: #streams.FeedbackSubmissionsStream(self), streams.LineItemStream(self), streams.ProductStream(self), + streams.TicketStream(self), ] From d4fcaf93b94483e91f2790f5d5054471e97d9742 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 15:16:30 -0400 Subject: [PATCH 063/105] add QuoteStream --- tap_hubspot/streams.py | 93 +++++++++++++++++++++++++++++++++++++++++- tap_hubspot/tap.py | 1 + 2 files changed, 93 insertions(+), 1 deletion(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 6cfd5ab..ed17796 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2369,7 +2369,7 @@ class TicketStream(HubspotStream): name = "ticket" path = "/objects/tickets" - primary_keys = ["id"] + primary_keys = ["properties"] schema = PropertiesList( Property( @@ -2387,6 +2387,97 @@ class TicketStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class QuoteStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/quotes + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "quote" + path = "/objects/quotes" + primary_keys = ["properties"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("hs_createdate", StringType), + Property("hs_expiration_date", StringType), + Property("hs_quote_amount", StringType), + Property("hs_quote_number", StringType), + Property("hs_status", StringType), + Property("hs_terms", StringType), + Property("hs_title", StringType), + Property("hubspot_owner_id", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index cccb246..2c76567 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -47,6 +47,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.LineItemStream(self), streams.ProductStream(self), streams.TicketStream(self), + streams.QuoteStream(self), ] From 67ab65ac7479ebc59f02c2986366f00378cdf9ec Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 15:20:18 -0400 Subject: [PATCH 064/105] add GoalStream --- tap_hubspot/streams.py | 90 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 91 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index ed17796..c5a6bbf 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2478,6 +2478,96 @@ class QuoteStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class GoalStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/goals + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "goal" + path = "/objects/goal_targets" + primary_keys = ["properties"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_created_by_user_id", StringType), + Property("hs_end_datetime", StringType), + Property("hs_goal_name", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_start_datetime", StringType), + Property("hs_target_amount", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 2c76567..529a37a 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -48,6 +48,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.ProductStream(self), streams.TicketStream(self), streams.QuoteStream(self), + streams.GoalStream(self), ] From dc4e26786759047f5a90d0dba1b0f434062421d6 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 15:55:58 -0400 Subject: [PATCH 065/105] add CallStream --- tap_hubspot/streams.py | 94 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 5 ++- 2 files changed, 97 insertions(+), 2 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index c5a6bbf..fc25a98 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2568,6 +2568,100 @@ class GoalStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class CallStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/calls + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "call" + path = "/objects/calls" + primary_keys = ["properties"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_call_body", StringType), + Property("hs_call_duration", StringType), + Property("hs_call_from_number", StringType), + Property("hs_call_recording_url", StringType), + Property("hs_call_status", StringType), + Property("hs_call_title", StringType), + Property("hs_call_to_number", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_timestamp", StringType), + Property("hubspot_owner_id", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 529a37a..47bf7b4 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -47,8 +47,9 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.LineItemStream(self), streams.ProductStream(self), streams.TicketStream(self), - streams.QuoteStream(self), - streams.GoalStream(self), + #streams.QuoteStream(self), + #streams.GoalStream(self), + streams.CallStream(self), ] From 21f3a4dedb799eacfbfdcd32cdb2c211f86052bd Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 16:01:37 -0400 Subject: [PATCH 066/105] add CommunicationStream --- tap_hubspot/streams.py | 88 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 89 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index fc25a98..0ee36f2 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2662,6 +2662,94 @@ class CallStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class CommunicationStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/communications + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "communication" + path = "/objects/Communications" + primary_keys = ["properties"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_communication_body", StringType), + Property("hs_communication_channel_type", StringType), + Property("hs_communication_logged_from", StringType), + Property("hs_lastmodifieddate", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 47bf7b4..9111847 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -50,6 +50,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: #streams.QuoteStream(self), #streams.GoalStream(self), streams.CallStream(self), + streams.CommunicationStream(self), ] From c9f5abcf0e0eeff7f7e4d3c61a846cda8483eb11 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 16:04:48 -0400 Subject: [PATCH 067/105] add EmailStream --- tap_hubspot/streams.py | 97 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 98 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 0ee36f2..40d2495 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2750,6 +2750,103 @@ class CommunicationStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class EmailStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/email + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "email" + path = "/objects/emails" + primary_keys = ["properties"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_email_direction", StringType), + Property("hs_email_sender_email", StringType), + Property("hs_email_sender_firstname", StringType), + Property("hs_email_sender_lastname", StringType), + Property("hs_email_status", StringType), + Property("hs_email_subject", StringType), + Property("hs_email_text", StringType), + Property("hs_email_to_email", StringType), + Property("hs_email_to_firstname", StringType), + Property("hs_email_to_lastname", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_timestamp", StringType), + Property("hubspot_owner_id", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 9111847..d5b9356 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -51,6 +51,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: #streams.GoalStream(self), streams.CallStream(self), streams.CommunicationStream(self), + streams.EmailStream(self), ] From da3f8c11dada7621651c86269033d07d74ccc8f4 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 16:10:33 -0400 Subject: [PATCH 068/105] add MeetingStream --- tap_hubspot/streams.py | 95 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 96 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 40d2495..c6a6cbc 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2847,6 +2847,101 @@ class EmailStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class MeetingStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/meetings + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "meeting" + path = "/objects/meetings" + primary_keys = ["properties"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_internal_meeting_notes", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_meeting_body", StringType), + Property("hs_meeting_end_time", StringType), + Property("hs_meeting_external_url", StringType), + Property("hs_meeting_location", StringType), + Property("hs_meeting_outcome", StringType), + Property("hs_meeting_start_time", StringType), + Property("hs_meeting_title", StringType), + Property("hs_timestamp", StringType), + Property("hubspot_owner_id", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index d5b9356..ebd6ad2 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -52,6 +52,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.CallStream(self), streams.CommunicationStream(self), streams.EmailStream(self), + streams.MeetingStream(self), ] From 0c756977dd678acaa9723dcae482f09b25ac1525 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 16:17:09 -0400 Subject: [PATCH 069/105] add NoteStream --- tap_hubspot/streams.py | 88 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 89 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index c6a6cbc..204ae61 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2942,6 +2942,94 @@ class MeetingStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class NoteStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/notes + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "note" + path = "/objects/notes" + primary_keys = ["properties"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_note_body", StringType), + Property("hs_timestamp", StringType), + Property("hubspot_owner_id", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index ebd6ad2..32558bc 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -53,6 +53,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.CommunicationStream(self), streams.EmailStream(self), streams.MeetingStream(self), + streams.NoteStream(self), ] From 067a7137c3a053b45180193432b39e415b576580 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 16:25:04 -0400 Subject: [PATCH 070/105] add PostalMailStream --- tap_hubspot/streams.py | 87 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 88 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 204ae61..867cbab 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -3030,6 +3030,93 @@ class NoteStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + + +class PostalMailStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/postal-mail + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "postalmail" + path = "/objects/postal_mail" + primary_keys = ["properties"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_postal_mail_body", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 32558bc..441dd71 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -54,6 +54,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.EmailStream(self), streams.MeetingStream(self), streams.NoteStream(self), + streams.PostalMailStream(self), ] From 29c6dcb2cdb4c546ed188a277f37feb96a6a9ed1 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 16:27:18 -0400 Subject: [PATCH 071/105] add TaskStream --- tap_hubspot/streams.py | 91 ++++++++++++++++++++++++++++++++++++++++++ tap_hubspot/tap.py | 1 + 2 files changed, 92 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 867cbab..35b902b 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -3117,6 +3117,97 @@ class PostalMailStream(HubspotStream): ).to_dict() + @property + def url_base(self) -> str: + """ + Returns an updated which has the api version + """ + base_url = "https://api.hubapi.com/crm/v3" + return base_url + + def get_url_params( + self, + context: dict | None, + next_page_token: Any | None, + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ + params: dict = {} + if next_page_token: + params["page"] = next_page_token + if self.replication_key: + params["sort"] = "asc" + params["order_by"] = self.replication_key + + return params + + def parse_response(self, response: requests.Response) -> Iterable[dict]: + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ + + resp_json = response.json() + + if isinstance(resp_json, list): + results = resp_json + elif resp_json.get("results") is not None: + results = resp_json["results"] + else: + results = resp_json + + yield from results + +class TaskStream(HubspotStream): + """ + https://developers.hubspot.com/docs/api/crm/tasks + """ + + """ + columns: columns which will be added to fields parameter in api + name: stream name + path: path which will be added to api url in client.py + schema: instream schema + primary_keys = primary keys for the table + replication_key = datetime keys for replication + """ + + columns = """ + properties + """ + + name = "task" + path = "/objects/tasks" + primary_keys = ["properties"] + + schema = PropertiesList( + Property( + "properties", + ObjectType( + Property("createdate", StringType), + Property("hs_lastmodifieddate", StringType), + Property("hs_task_body", StringType), + Property("hs_task_priority", StringType), + Property("hs_task_status", StringType), + Property("hs_task_subject", StringType), + Property("hs_timestamp", StringType), + Property("hubspot_owner_id", StringType), + ), + ), + + ).to_dict() + @property def url_base(self) -> str: """ diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 441dd71..806b405 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -55,6 +55,7 @@ def discover_streams(self) -> list[streams.HubspotStream]: streams.MeetingStream(self), streams.NoteStream(self), streams.PostalMailStream(self), + streams.TaskStream(self), ] From 6d950df541339e295b2de24a14f58db25eaf2435 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 28 Jun 2023 18:45:02 -0400 Subject: [PATCH 072/105] update schemas with ids, timestamp, and archived data --- tap_hubspot/streams.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 35b902b..d8c5114 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2372,6 +2372,7 @@ class TicketStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -2384,6 +2385,9 @@ class TicketStream(HubspotStream): Property("subject", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() @@ -2462,6 +2466,7 @@ class QuoteStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -2475,6 +2480,9 @@ class QuoteStream(HubspotStream): Property("hubspot_owner_id", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() @@ -2553,6 +2561,7 @@ class GoalStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -2565,6 +2574,9 @@ class GoalStream(HubspotStream): Property("hs_target_amount", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() @@ -2643,6 +2655,7 @@ class CallStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -2659,6 +2672,9 @@ class CallStream(HubspotStream): Property("hubspot_owner_id", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() @@ -2737,6 +2753,7 @@ class CommunicationStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -2747,6 +2764,9 @@ class CommunicationStream(HubspotStream): Property("hs_lastmodifieddate", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() @@ -2825,6 +2845,7 @@ class EmailStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -2844,6 +2865,9 @@ class EmailStream(HubspotStream): Property("hubspot_owner_id", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() @@ -2922,6 +2946,7 @@ class MeetingStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -2939,6 +2964,9 @@ class MeetingStream(HubspotStream): Property("hubspot_owner_id", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() @@ -3017,6 +3045,7 @@ class NoteStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -3027,6 +3056,9 @@ class NoteStream(HubspotStream): Property("hubspot_owner_id", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() @@ -3106,6 +3138,7 @@ class PostalMailStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -3114,6 +3147,9 @@ class PostalMailStream(HubspotStream): Property("hs_postal_mail_body", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() @@ -3192,6 +3228,7 @@ class TaskStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( + Property("id", StringType), Property( "properties", ObjectType( @@ -3205,6 +3242,9 @@ class TaskStream(HubspotStream): Property("hubspot_owner_id", StringType), ), ), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), ).to_dict() From 20fc7a2a9240cacf720f7390fb00ca46bf916cc7 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Fri, 30 Jun 2023 14:52:38 -0400 Subject: [PATCH 073/105] update pipeline stream stage values --- tap_hubspot/streams.py | 42 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index d8c5114..92f8b59 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -413,7 +413,26 @@ class TicketPipelineStream(HubspotStream): Property("label", StringType), Property("displayOrder", StringType), Property("active", BooleanType), - Property("stages", StringType), + Property( + "stages", + ArrayType( + ObjectType( + Property("label", StringType), + Property("displayOrder", StringType), + Property( + "metadata", + ObjectType( + Property("ticketState", StringType), + Property("isClosed", StringType), + ), + ), + Property("stageId", IntegerType), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("active", StringType), + ), + ), + ), Property("objectType", StringType), Property("objectTypeId", StringType), Property("pipelineId", StringType), @@ -504,7 +523,26 @@ class DealPipelineStream(HubspotStream): Property("label", StringType), Property("displayOrder", StringType), Property("active", BooleanType), - Property("stages", StringType), + Property( + "stages", + ArrayType( + ObjectType( + Property("label", StringType), + Property("displayOrder", StringType), + Property( + "metadata", + ObjectType( + Property("isClosed", BooleanType), + Property("probability", StringType), + ), + ), + Property("stageId", IntegerType), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("active", StringType), + ), + ), + ), Property("objectType", StringType), Property("objectTypeId", StringType), Property("pipelineId", StringType), From 41f49cd04ebe0cc7f4ef1f4efbdc98782c9161cf Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Fri, 30 Jun 2023 14:58:36 -0400 Subject: [PATCH 074/105] change object stream ids to int --- tap_hubspot/streams.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 92f8b59..26eb047 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -1931,7 +1931,7 @@ class CompanyStream(HubspotStream): primary_keys = ["id"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2029,7 +2029,7 @@ class DealStream(HubspotStream): primary_keys = ["id"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2124,7 +2124,7 @@ class FeedbackSubmissionsStream(HubspotStream): primary_keys = ["id"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2220,7 +2220,7 @@ class LineItemStream(HubspotStream): primary_keys = ["id"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2315,7 +2315,7 @@ class ProductStream(HubspotStream): primary_keys = ["id"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2410,7 +2410,7 @@ class TicketStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2504,7 +2504,7 @@ class QuoteStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2599,7 +2599,7 @@ class GoalStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2693,7 +2693,7 @@ class CallStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2791,7 +2791,7 @@ class CommunicationStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2883,7 +2883,7 @@ class EmailStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -2984,7 +2984,7 @@ class MeetingStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -3083,7 +3083,7 @@ class NoteStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -3176,7 +3176,7 @@ class PostalMailStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( @@ -3266,7 +3266,7 @@ class TaskStream(HubspotStream): primary_keys = ["properties"] schema = PropertiesList( - Property("id", StringType), + Property("id", IntegerType), Property( "properties", ObjectType( From 420b7567ff9eb4739a7751d7f9f17e2e6ef0863a Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Fri, 30 Jun 2023 16:28:18 -0400 Subject: [PATCH 075/105] Remove loaders from meltano.yml --- meltano.yml | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/meltano.yml b/meltano.yml index 995f6f4..26f4a49 100644 --- a/meltano.yml +++ b/meltano.yml @@ -21,29 +21,6 @@ plugins: value: '2023-01-01T00:00:00Z' - name: end_date value: '2023-05-22T00:00:00Z' - loaders: - - name: target-jsonl - variant: andyh1203 - pip_url: target-jsonl - - name: target-csv - variant: hotgluexyz - pip_url: git+https://github.com/hotgluexyz/target-csv.git@0.3.3 - config: - destination_path: /Users/neilgorman/Documents/GitHub/tap-hubspot/output - - name: target-snowflake - variant: transferwise - pip_url: pipelinewise-target-snowflake - config: - batch_size: 5 environments: -- name: dev - config: - plugins: - loaders: - - name: target-csv - - name: target-snowflake - config: - primary_key_required: false - add_metadata_columns: true - name: staging - name: prod From 5f8447f2b7a1617785901735426eaf0176502d9f Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Fri, 30 Jun 2023 16:28:27 -0400 Subject: [PATCH 076/105] uncomment all streams --- tap_hubspot/tap.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 806b405..86cfb2d 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -34,21 +34,21 @@ def discover_streams(self) -> list[streams.HubspotStream]: A list of discovered streams. """ return [ - #streams.ContactStream(self), - #streams.UsersStream(self), - #streams.OwnersStream(self), - #streams.TicketPipelineStream(self), - #streams.DealPipelineStream(self), - #streams.EmailSubscriptionStream(self), - #streams.PropertyNotesStream(self), + streams.ContactStream(self), + streams.UsersStream(self), + streams.OwnersStream(self), + streams.TicketPipelineStream(self), + streams.DealPipelineStream(self), + streams.EmailSubscriptionStream(self), + streams.PropertyNotesStream(self), streams.CompanyStream(self), streams.DealStream(self), - #streams.FeedbackSubmissionsStream(self), + streams.FeedbackSubmissionsStream(self), streams.LineItemStream(self), streams.ProductStream(self), streams.TicketStream(self), - #streams.QuoteStream(self), - #streams.GoalStream(self), + streams.QuoteStream(self), + streams.GoalStream(self), streams.CallStream(self), streams.CommunicationStream(self), streams.EmailStream(self), From c078cc2b3676e66b6c1e4a32506fa055d8b002d1 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Fri, 30 Jun 2023 16:33:46 -0400 Subject: [PATCH 077/105] reformatting --- tap_hubspot/streams.py | 118 ++++++++++++++++++++--------------------- 1 file changed, 57 insertions(+), 61 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 26eb047..7ea3937 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -1907,6 +1907,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: return property_records + class CompanyStream(HubspotStream): """ @@ -1948,11 +1949,8 @@ class CompanyStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() - - @property def url_base(self) -> str: """ @@ -2046,7 +2044,6 @@ class DealStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2058,9 +2055,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2101,6 +2098,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class FeedbackSubmissionsStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/feedback-submissions @@ -2142,7 +2140,6 @@ class FeedbackSubmissionsStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2154,9 +2151,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2197,6 +2194,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class LineItemStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/line-items @@ -2237,7 +2235,6 @@ class LineItemStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2249,9 +2246,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2292,6 +2289,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class ProductStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/products @@ -2332,7 +2330,6 @@ class ProductStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2344,9 +2341,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2387,6 +2384,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class TicketStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/tickets @@ -2426,7 +2424,6 @@ class TicketStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2438,9 +2435,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2481,6 +2478,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class QuoteStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/quotes @@ -2521,7 +2519,6 @@ class QuoteStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2533,9 +2530,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2576,6 +2573,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class GoalStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/goals @@ -2615,7 +2613,6 @@ class GoalStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2627,9 +2624,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2670,6 +2667,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class CallStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/calls @@ -2713,7 +2711,6 @@ class CallStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2725,9 +2722,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2768,6 +2765,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class CommunicationStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/communications @@ -2805,7 +2803,6 @@ class CommunicationStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2817,9 +2814,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2860,6 +2857,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class EmailStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/email @@ -2906,7 +2904,6 @@ class EmailStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -2918,9 +2915,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -2961,6 +2958,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class MeetingStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/meetings @@ -3005,7 +3003,6 @@ class MeetingStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -3017,9 +3014,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -3059,7 +3056,8 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: results = resp_json yield from results - + + class NoteStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/notes @@ -3097,7 +3095,6 @@ class NoteStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -3109,9 +3106,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -3188,7 +3185,6 @@ class PostalMailStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -3200,9 +3196,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -3243,6 +3239,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: yield from results + class TaskStream(HubspotStream): """ https://developers.hubspot.com/docs/api/crm/tasks @@ -3283,7 +3280,6 @@ class TaskStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -3295,9 +3291,9 @@ def url_base(self) -> str: return base_url def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, + self, + context: dict | None, + next_page_token: Any | None, ) -> dict[str, Any]: """Return a dictionary of values to be used in URL parameterization. @@ -3336,4 +3332,4 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: else: results = resp_json - yield from results \ No newline at end of file + yield from results From 6b346ea3d9023bde35f879fb55523acd854a065f Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 12:19:56 -0400 Subject: [PATCH 078/105] Update doc links & comments in url_base function, get records description --- tap_hubspot/streams.py | 102 +++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 49 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 7ea3937..48e24de 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -111,7 +111,7 @@ class ContactStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated path which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/contacts/v1" return base_url @@ -215,7 +215,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: class UsersStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods/ + https://developers.hubspot.com/docs/api/settings/user-provisioning """ """ @@ -245,7 +245,7 @@ class UsersStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/settings/v3" return base_url @@ -298,7 +298,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: class OwnersStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods/owners/get_owners + https://developers.hubspot.com/docs/api/crm/owners#endpoint?spec=GET-/crm/v3/owners/ """ """ @@ -334,7 +334,7 @@ class OwnersStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -444,7 +444,7 @@ class TicketPipelineStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm-pipelines/v1" return base_url @@ -554,7 +554,7 @@ class DealPipelineStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm-pipelines/v1" return base_url @@ -645,7 +645,7 @@ class EmailSubscriptionStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/email/public/v1" return base_url @@ -698,7 +698,7 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: class PropertyTicketStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -745,7 +745,7 @@ class PropertyTicketStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -807,7 +807,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyDealStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -855,7 +855,7 @@ class PropertyDealStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -897,7 +897,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyContactStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -944,7 +944,7 @@ class PropertyContactStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -986,7 +986,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyCompanyStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1033,7 +1033,7 @@ class PropertyCompanyStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1075,7 +1075,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyProductStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1122,7 +1122,7 @@ class PropertyProductStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1164,7 +1164,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyLineItemStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1211,7 +1211,7 @@ class PropertyLineItemStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1253,7 +1253,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyEmailStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1300,7 +1300,7 @@ class PropertyEmailStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1342,7 +1342,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyPostalMailStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1389,7 +1389,7 @@ class PropertyPostalMailStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1431,7 +1431,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyCallStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1478,7 +1478,7 @@ class PropertyCallStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1520,7 +1520,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyMeetingStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1567,7 +1567,7 @@ class PropertyMeetingStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1609,7 +1609,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyTaskStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1656,7 +1656,7 @@ class PropertyTaskStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1698,7 +1698,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyCommunicationStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1745,7 +1745,7 @@ class PropertyCommunicationStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1787,7 +1787,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: class PropertyNotesStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods + https://developers.hubspot.com/docs/api/crm/properties#endpoint?spec=PATCH-/crm/v3/properties/{objectType}/{propertyName} """ """ @@ -1834,7 +1834,7 @@ class PropertyNotesStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -1873,6 +1873,10 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: + """ + Merges all the property stream data into a single property table + """ + property_ticket = PropertyTicketStream(self._tap, schema={"properties": {}}) property_deal = PropertyDealStream(self._tap, schema={"properties": {}}) property_contact = PropertyContactStream(self._tap, schema={"properties": {}}) @@ -1954,7 +1958,7 @@ class CompanyStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2049,7 +2053,7 @@ class DealStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2145,7 +2149,7 @@ class FeedbackSubmissionsStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2240,7 +2244,7 @@ class LineItemStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2335,7 +2339,7 @@ class ProductStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2429,7 +2433,7 @@ class TicketStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2524,7 +2528,7 @@ class QuoteStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2618,7 +2622,7 @@ class GoalStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2716,7 +2720,7 @@ class CallStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2808,7 +2812,7 @@ class CommunicationStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -2909,7 +2913,7 @@ class EmailStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -3008,7 +3012,7 @@ class MeetingStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -3100,7 +3104,7 @@ class NoteStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -3190,7 +3194,7 @@ class PostalMailStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url @@ -3285,7 +3289,7 @@ class TaskStream(HubspotStream): @property def url_base(self) -> str: """ - Returns an updated which has the api version + Returns an updated path which includes the api version """ base_url = "https://api.hubapi.com/crm/v3" return base_url From 1688168d67067a8fc56b4fcdbfee10816930f752 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 12:20:56 -0400 Subject: [PATCH 079/105] add dev environment --- meltano.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/meltano.yml b/meltano.yml index 26f4a49..00329fe 100644 --- a/meltano.yml +++ b/meltano.yml @@ -22,5 +22,6 @@ plugins: - name: end_date value: '2023-05-22T00:00:00Z' environments: +- name: dev - name: staging - name: prod From 7aaa74245c6981eabf3cde4f3c4723757552e12f Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 13:04:10 -0400 Subject: [PATCH 080/105] update primary & replication keys --- tap_hubspot/streams.py | 52 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 48e24de..1362f84 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -234,6 +234,8 @@ class UsersStream(HubspotStream): name = "users" path = "/users?fields={}".format(columns) primary_keys = ["id"] + replication_key = "id" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -1934,6 +1936,8 @@ class CompanyStream(HubspotStream): name = "companies" path = "/objects/companies" primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2029,6 +2033,8 @@ class DealStream(HubspotStream): name = "deals" path = "/objects/deals" primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2124,6 +2130,8 @@ class FeedbackSubmissionsStream(HubspotStream): name = "feedbacksubmissions" path = "/objects/feedback_submissions" primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2220,6 +2228,8 @@ class LineItemStream(HubspotStream): name = "lineitems" path = "/objects/line_items" primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2315,6 +2325,8 @@ class ProductStream(HubspotStream): name = "product" path = "/objects/products" primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2409,7 +2421,9 @@ class TicketStream(HubspotStream): name = "ticket" path = "/objects/tickets" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2503,7 +2517,9 @@ class QuoteStream(HubspotStream): name = "quote" path = "/objects/quotes" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2598,7 +2614,9 @@ class GoalStream(HubspotStream): name = "goal" path = "/objects/goal_targets" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2692,7 +2710,9 @@ class CallStream(HubspotStream): name = "call" path = "/objects/calls" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2790,7 +2810,9 @@ class CommunicationStream(HubspotStream): name = "communication" path = "/objects/Communications" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2882,7 +2904,9 @@ class EmailStream(HubspotStream): name = "email" path = "/objects/emails" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -2983,7 +3007,9 @@ class MeetingStream(HubspotStream): name = "meeting" path = "/objects/meetings" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -3082,7 +3108,9 @@ class NoteStream(HubspotStream): name = "note" path = "/objects/notes" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -3174,7 +3202,9 @@ class PostalMailStream(HubspotStream): name = "postalmail" path = "/objects/postal_mail" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), @@ -3264,7 +3294,9 @@ class TaskStream(HubspotStream): name = "task" path = "/objects/tasks" - primary_keys = ["properties"] + primary_keys = ["id"] + replication_key = "updatedAt" + replication_method = "incremental" schema = PropertiesList( Property("id", IntegerType), From bc4e67e36e7a315aa152a9520bf66cef6ca3d23a Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 17:01:03 -0400 Subject: [PATCH 081/105] Adding Permissions section to readme --- README.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/README.md b/README.md index d11d34c..765aacf 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,29 @@ environment variable is set either in the terminal context or in the `.env` file A Hubspot access token is required to make API requests. (See [Hubspot API](https://developers.hubspot.com/docs/api/working-with-oauth) docs for more info) + +### Permissions + +The following scopes need to be added to your access token to access the following endpoints: + +Contacts: `crm.schemas.contacts.read` or `crm.objects.contacts.read` +Users: `settings.users.read` +Ticket Pipeline: `media_bridge.read` or `crm.schemas.custom.read` or `timeline` or `tickets` or `e-commerce` or `crm.objects.goals.read` +Deal Pipeline: `media_bridge.read` or `crm.schemas.custom.read` or `timeline` or `tickets` or `e-commerce` or `crm.objects.goals.read` +Properties: All of `Tickets`, `crm.objects.deals.read`, `sales-email-read`, `crm.objects.contacts.read`, `crm.objects.companies.read`, `e-commerce`, `crm.objects.quotes.read` +Owners: `crm.objects.owners.read` +Companies: `crm.objects.companies.read` +Deals: `crm.objects.deals.read` +Feedback Submissions: `crm.objects.contacts.read` +Line Items: `e-commerce` +Products: `e-commerce` +Tickets: `tickets` +Quotes: `crm.objects.quotes.read` or `crm.schemas.quotes.read` +Goals: `crm.objects.goals.read` +Emails: `sales-email-read` + +For more info on the streams and permissions, check the [Hubspot API Documentation](https://developers.hubspot.com/docs/api/overview). + ## Usage You can easily run `tap-hubspot` by itself or in a pipeline using [Meltano](https://meltano.com/). From 9b59dce6ccc6901b2e77965ce98f3ef8fe188c46 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 19:20:33 -0400 Subject: [PATCH 082/105] Update Pagination method --- tap_hubspot/client.py | 18 +++++++++++++++ tap_hubspot/streams.py | 50 +++++++++++++++++++++--------------------- 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/tap_hubspot/client.py b/tap_hubspot/client.py index 017b53e..d938c4b 100644 --- a/tap_hubspot/client.py +++ b/tap_hubspot/client.py @@ -81,6 +81,24 @@ def get_new_paginator(self) -> BaseAPIPaginator: """ return super().get_new_paginator() + def get_next_page_token( + self, + response: requests.Response, + previous_token: t.Any | None, + ) -> t.Any | None: + """Return a token for identifying next page or None if no more pages.""" + # If pagination is required, return a token which can be used to get the + # next page. If this is the final page, return "None" to end the + # pagination loop. + resp_json = response.json() + paging = resp_json.get("paging") + + if paging is not None: + next_page_token = resp_json.get("paging", {}).get("next", {}).get("after") + else: + next_page_token = None + return next_page_token + def get_url_params( self, context: dict | None, diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 1362f84..62b9bb8 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -44,7 +44,7 @@ class ContactStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("vid", IntegerType), + Property("vid", StringType), Property("canonical-vid", IntegerType), Property("merged-vids", ArrayType(StringType)), Property("portal-id", IntegerType), @@ -132,7 +132,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -238,7 +238,7 @@ class UsersStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property("email", StringType), Property("roleIds", ArrayType(StringType)), Property("primaryteamid", StringType), @@ -268,7 +268,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -357,7 +357,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -467,7 +467,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -577,7 +577,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -668,7 +668,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -768,7 +768,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -1878,7 +1878,7 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: """ Merges all the property stream data into a single property table """ - + property_ticket = PropertyTicketStream(self._tap, schema={"properties": {}}) property_deal = PropertyDealStream(self._tap, schema={"properties": {}}) property_contact = PropertyContactStream(self._tap, schema={"properties": {}}) @@ -1983,7 +1983,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2080,7 +2080,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2178,7 +2178,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2275,7 +2275,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2372,7 +2372,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2468,7 +2468,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2565,7 +2565,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2661,7 +2661,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2761,7 +2761,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2855,7 +2855,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -2958,7 +2958,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -3059,7 +3059,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -3153,7 +3153,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -3245,7 +3245,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key @@ -3342,7 +3342,7 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key From 17c7f849ef7007aeed9b35d54ddc8701ff628b93 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 21:04:22 -0400 Subject: [PATCH 083/105] Update Property column types --- tap_hubspot/streams.py | 335 ++++++++++++++++++++++++++++++++++------- 1 file changed, 278 insertions(+), 57 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 62b9bb8..6564491 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -323,7 +323,7 @@ class OwnersStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property("email", StringType), Property("firstName", StringType), Property("lastName", StringType), @@ -413,14 +413,14 @@ class TicketPipelineStream(HubspotStream): schema = PropertiesList( Property("label", StringType), - Property("displayOrder", StringType), + Property("displayOrder", IntegerType), Property("active", BooleanType), Property( "stages", ArrayType( ObjectType( Property("label", StringType), - Property("displayOrder", StringType), + Property("displayOrder", IntegerType), Property( "metadata", ObjectType( @@ -523,14 +523,14 @@ class DealPipelineStream(HubspotStream): schema = PropertiesList( Property("label", StringType), - Property("displayOrder", StringType), + Property("displayOrder", IntegerType), Property("active", BooleanType), Property( "stages", ArrayType( ObjectType( Property("label", StringType), - Property("displayOrder", StringType), + Property("displayOrder", IntegerType), Property( "metadata", ObjectType( @@ -731,17 +731,34 @@ class PropertyTicketStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -840,18 +857,35 @@ class PropertyDealStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), Property("calculationFormula", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -930,17 +964,34 @@ class PropertyContactStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1019,17 +1070,34 @@ class PropertyCompanyStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1108,17 +1176,34 @@ class PropertyProductStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1197,17 +1282,34 @@ class PropertyLineItemStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1286,17 +1388,34 @@ class PropertyEmailStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1375,17 +1494,34 @@ class PropertyPostalMailStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1464,17 +1600,34 @@ class PropertyCallStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1553,17 +1706,34 @@ class PropertyMeetingStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1642,17 +1812,34 @@ class PropertyTaskStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1731,17 +1918,34 @@ class PropertyCommunicationStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property @@ -1820,17 +2024,34 @@ class PropertyNotesStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", StringType), - Property("displayOrder", StringType), + Property("options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), + Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), Property("hasUniqueValue", BooleanType), Property("hidden", BooleanType), Property("hubspotDefined", BooleanType), - Property("modificationMetadata", StringType), + Property( + "modificationMetadata", + ObjectType( + Property("readOnlyOptions", BooleanType), + Property("readOnlyValue", BooleanType), + Property("readOnlyDefinition", BooleanType), + Property("archivable", BooleanType), + ), + ), Property("formField", BooleanType), Property("hubspot_object", StringType), - Property("showCurrencySymbol", StringType), ).to_dict() @property From c4578d7a7c0d0902c693900d7cb341e70ac1cc7f Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 21:25:22 -0400 Subject: [PATCH 084/105] update types for v1 endpoints --- tap_hubspot/streams.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 6564491..1e532f5 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -44,7 +44,7 @@ class ContactStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("vid", StringType), + Property("vid", IntegerType), Property("canonical-vid", IntegerType), Property("merged-vids", ArrayType(StringType)), Property("portal-id", IntegerType), @@ -52,7 +52,7 @@ class ContactStream(HubspotStream): Property( "properties", ObjectType( - Property("lastmodifieddate", StringType), + Property("lastmodifieddate", IntegerType), Property("email", StringType), Property("message", StringType), Property("city", StringType), @@ -105,7 +105,7 @@ class ContactStream(HubspotStream): Property("form-submissions", ArrayType(StringType)), Property("identity-profiles", ArrayType(StringType)), Property("merge-audits", ArrayType(StringType)), - Property("addedAt", StringType), + Property("addedAt", IntegerType), ).to_dict() @property @@ -428,17 +428,17 @@ class TicketPipelineStream(HubspotStream): Property("isClosed", StringType), ), ), - Property("stageId", IntegerType), - Property("createdAt", StringType), + Property("stageId", StringType), + Property("createdAt", IntegerType), Property("updatedAt", StringType), - Property("active", StringType), + Property("active", BooleanType), ), ), ), Property("objectType", StringType), Property("objectTypeId", StringType), Property("pipelineId", StringType), - Property("createdAt", StringType), + Property("createdAt", IntegerType), Property("updatedAt", StringType), Property("default", BooleanType), ).to_dict() @@ -538,17 +538,17 @@ class DealPipelineStream(HubspotStream): Property("probability", StringType), ), ), - Property("stageId", IntegerType), - Property("createdAt", StringType), + Property("stageId", StringType), + Property("createdAt", IntegerType), Property("updatedAt", StringType), - Property("active", StringType), + Property("active", BooleanType), ), ), ), Property("objectType", StringType), Property("objectTypeId", StringType), Property("pipelineId", StringType), - Property("createdAt", StringType), + Property("createdAt", IntegerType), Property("updatedAt", StringType), Property("default", BooleanType), ).to_dict() @@ -641,7 +641,7 @@ class EmailSubscriptionStream(HubspotStream): Property("category", StringType), Property("channel", StringType), Property("internalName", StringType), - Property("businessUnitId", StringType), + Property("businessUnitId", IntegerType), ).to_dict() @property From cf9ce999c2c3c5af4cb3a0bc709091410b00a5e0 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 21:27:47 -0400 Subject: [PATCH 085/105] updated columns in v3 streams --- tap_hubspot/streams.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 1e532f5..71bbccf 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2637,7 +2637,7 @@ class TicketStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "ticket" @@ -2733,7 +2733,7 @@ class QuoteStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "quote" @@ -2830,7 +2830,7 @@ class GoalStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "goal" @@ -2926,7 +2926,7 @@ class CallStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "call" @@ -3026,7 +3026,7 @@ class CommunicationStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "communication" @@ -3120,7 +3120,7 @@ class EmailStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "email" @@ -3223,7 +3223,7 @@ class MeetingStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "meeting" @@ -3324,7 +3324,7 @@ class NoteStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "note" @@ -3418,7 +3418,7 @@ class PostalMailStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "postalmail" @@ -3510,7 +3510,7 @@ class TaskStream(HubspotStream): """ columns = """ - properties + id, properties, createdAt, updatedAt, archived """ name = "task" From 32d90589824073a690369a91d0c9c461c96a5c3b Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 21:39:00 -0400 Subject: [PATCH 086/105] Update Contacts Stream to v3 --- tap_hubspot/streams.py | 127 ++++++----------------------------------- 1 file changed, 16 insertions(+), 111 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 71bbccf..115ff45 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -21,7 +21,7 @@ class ContactStream(HubspotStream): """ - https://legacydocs.hubspot.com/docs/methods/lists/get_lists + https://developers.hubspot.com/docs/api/crm/contacts """ """ @@ -38,74 +38,30 @@ class ContactStream(HubspotStream): """ name = "contact" - path = "/lists/all/contacts/all?fields={}".format(columns) - primary_keys = ["addedAt"] - replication_key = "addedAt" + path = "/objects/contacts" + primary_keys = ["id"] + replication_key = "updatedAt" replication_method = "incremental" schema = PropertiesList( - Property("vid", IntegerType), - Property("canonical-vid", IntegerType), - Property("merged-vids", ArrayType(StringType)), - Property("portal-id", IntegerType), - Property("is-contact", BooleanType), + Property("id", StringType), Property( "properties", ObjectType( - Property("lastmodifieddate", IntegerType), - Property("email", StringType), - Property("message", StringType), - Property("city", StringType), Property("company", StringType), - Property("createddate", StringType), + Property("createdate", StringType), + Property("email", StringType), Property("firstname", StringType), - Property("hs_all_contact_vids", IntegerType), - Property("hs_date_entered_lead", StringType), - Property("hs_marketable_reason_id", StringType), - Property("hs_is_unworked", BooleanType), - Property("hs_marketable_until_renewal", BooleanType), - Property("hs_latest_source_timestamp", StringType), - Property("hs_marketable_reason_type", StringType), - Property("hs_marketable_status", BooleanType), - Property("hs_is_contact", BooleanType), - Property("hs_email_domain", StringType), - Property("hs_pipeline", StringType), - Property("hs_sequences_actively_enrolled_count", StringType), - Property("hs_object_id", StringType), - Property("hs_time_in_lead", StringType), - Property("num_conversion_events", StringType), - Property("num_unique_conversion_events", StringType), + Property("lastmodifieddate", StringType), Property("lastname", StringType), - Property("hs_analytics_num_page_views", StringType), - Property("hs_analytics_num_event_completions", StringType), - Property("hs_analytics_first_timestamp", StringType), - Property("hs_social_twitter_clicks", StringType), - Property("hs_analytics_num_visits", StringType), - Property("twitterprofilephoto", StringType), - Property("twitterhandle", StringType), - Property("hs_analytics_source_data_2", StringType), - Property("hs_social_facebook_clicks", StringType), - Property("hs_analytics_source", StringType), - Property("hs_analytics_source_data_1", StringType), - Property("hs_latest_source", StringType), - Property("hs_latest_source_data_1", StringType), - Property("hs_latest_source_data_2", StringType), - Property("hs_social_google_plus_clicks", StringType), - Property("hs_social_num_broadcast_clicks", StringType), - Property("state", StringType), - Property("hs_social_linkedin_clicks", StringType), - Property("hs_lifecyclestage_lead_date", StringType), - Property("hs_analytics_revenue", StringType), - Property("hs_analytics_average_page_views", StringType), + Property("phone", StringType), Property("website", StringType), - Property("lifecyclestage", StringType), - Property("jobtitle", StringType), ), ), - Property("form-submissions", ArrayType(StringType)), - Property("identity-profiles", ArrayType(StringType)), - Property("merge-audits", ArrayType(StringType)), - Property("addedAt", IntegerType), + Property("createdAt", StringType), + Property("updatedAt", StringType), + Property("archived", BooleanType), + ).to_dict() @property @@ -113,7 +69,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/contacts/v1" + base_url = "https://api.hubapi.com/crm/v3" return base_url def get_url_params( @@ -137,57 +93,6 @@ def get_url_params( params["sort"] = "asc" params["order_by"] = self.replication_key - params["property"] = ( - "message", - "email", - "city", - "company", - "createddate", - "firstname", - "hs_all_contact_vids", - "hs_date_entered_lead", - "hs_marketable_reason_id", - "hs_is_unworked", - "hs_marketable_until_renewal", - "hs_latest_source_timestamp", - "hs_marketable_reason_type", - "hs_marketable_status", - "hs_is_contact", - "hs_email_domain", - "hs_pipeline", - "hs_sequences_actively_enrolled_count", - "hs_object_id", - "hs_time_in_lead", - "num_conversion_events", - "num_unique_conversion_events", - "lastname", - "hs_analytics_num_page_views", - "hs_analytics_num_event_completions", - "hs_analytics_first_timestamp", - "hs_social_twitter_clicks", - "hs_analytics_num_visits", - "twitterprofilephoto", - "twitterhandle", - "hs_analytics_source_data_2", - "hs_social_facebook_clicks", - "hs_analytics_source", - "hs_analytics_source_data_1", - "hs_latest_source", - "hs_latest_source_data_1", - "hs_latest_source_data_2", - "hs_social_google_plus_clicks", - "hs_social_num_broadcast_clicks", - "state", - "hs_social_linkedin_clicks", - "hs_lifecyclestage_lead_date", - "hs_analytics_revenue", - "hs_analytics_average_page_views", - "website", - "lifecyclestage", - "jobtitle", - ) - params["propertyMode"] = "value_and_history" - return params def parse_response(self, response: requests.Response) -> Iterable[dict]: @@ -204,8 +109,8 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: if isinstance(resp_json, list): results = resp_json - elif resp_json.get("contacts") is not None: - results = resp_json["contacts"] + elif resp_json.get("results") is not None: + results = resp_json["results"] else: results = resp_json From 27c34d922f8eb3f2faf5fdb081bfceee84cbbc0a Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 21:48:50 -0400 Subject: [PATCH 087/105] Change id types for v3 to String --- tap_hubspot/streams.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 115ff45..15824e3 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -2066,7 +2066,7 @@ class CompanyStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -2163,7 +2163,7 @@ class DealStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -2260,7 +2260,7 @@ class FeedbackSubmissionsStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -2358,7 +2358,7 @@ class LineItemStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -2455,7 +2455,7 @@ class ProductStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -2552,7 +2552,7 @@ class TicketStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -2648,7 +2648,7 @@ class QuoteStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -2745,7 +2745,7 @@ class GoalStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -2841,7 +2841,7 @@ class CallStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -2941,7 +2941,7 @@ class CommunicationStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -3035,7 +3035,7 @@ class EmailStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -3138,7 +3138,7 @@ class MeetingStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -3239,7 +3239,7 @@ class NoteStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -3333,7 +3333,7 @@ class PostalMailStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( @@ -3425,7 +3425,7 @@ class TaskStream(HubspotStream): replication_method = "incremental" schema = PropertiesList( - Property("id", IntegerType), + Property("id", StringType), Property( "properties", ObjectType( From af37e26edd4c3d58acc55977cbeef4260ad09d5b Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Mon, 3 Jul 2023 21:56:08 -0400 Subject: [PATCH 088/105] formatting --- tap_hubspot/streams.py | 300 +++++++++++++++++++++-------------------- 1 file changed, 156 insertions(+), 144 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 15824e3..631ff93 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -61,7 +61,6 @@ class ContactStream(HubspotStream): Property("createdAt", StringType), Property("updatedAt", StringType), Property("archived", BooleanType), - ).to_dict() @property @@ -636,17 +635,18 @@ class PropertyTicketStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -762,17 +762,18 @@ class PropertyDealStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -869,17 +870,18 @@ class PropertyContactStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -975,17 +977,18 @@ class PropertyCompanyStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -1081,17 +1084,18 @@ class PropertyProductStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -1187,17 +1191,18 @@ class PropertyLineItemStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -1293,17 +1298,18 @@ class PropertyEmailStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -1399,17 +1405,18 @@ class PropertyPostalMailStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -1505,17 +1512,18 @@ class PropertyCallStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -1611,17 +1619,18 @@ class PropertyMeetingStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -1717,17 +1726,18 @@ class PropertyTaskStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -1823,17 +1833,18 @@ class PropertyCommunicationStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), @@ -1929,17 +1940,18 @@ class PropertyNotesStream(HubspotStream): Property("fieldType", StringType), Property("description", StringType), Property("groupName", StringType), - Property("options", - ArrayType( - ObjectType( - Property("label", StringType), - Property("description", StringType), - Property("value", StringType), - Property("displayOrder", IntegerType), - Property("hidden", BooleanType), - ), - ), - ), + Property( + "options", + ArrayType( + ObjectType( + Property("label", StringType), + Property("description", StringType), + Property("value", StringType), + Property("displayOrder", IntegerType), + Property("hidden", BooleanType), + ), + ), + ), Property("displayOrder", IntegerType), Property("calculated", BooleanType), Property("externalOptions", BooleanType), From cbc414236a182994dafd1cc272a11719c479afec Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Tue, 4 Jul 2023 10:38:47 +0530 Subject: [PATCH 089/105] updated property stream --- tap_hubspot/streams.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 631ff93..3a1ec7a 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -623,6 +623,7 @@ class PropertyTicketStream(HubspotStream): name = "propertyticket" path = "/properties/tickets?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -750,6 +751,7 @@ class PropertyDealStream(HubspotStream): name = "propertydeal" path = "/properties/deals?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -858,6 +860,7 @@ class PropertyContactStream(HubspotStream): name = "propertycontact" path = "/properties/contacts?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -965,6 +968,7 @@ class PropertyCompanyStream(HubspotStream): name = "propertycompany" path = "/properties/company?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1072,6 +1076,7 @@ class PropertyProductStream(HubspotStream): name = "propertyproduct" path = "/properties/product?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1179,6 +1184,7 @@ class PropertyLineItemStream(HubspotStream): name = "propertylineitem" path = "/properties/line_item?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1286,6 +1292,7 @@ class PropertyEmailStream(HubspotStream): name = "propertyemail" path = "/properties/email?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1393,6 +1400,7 @@ class PropertyPostalMailStream(HubspotStream): name = "propertypostalmail" path = "/properties/postal_mail?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1500,6 +1508,7 @@ class PropertyCallStream(HubspotStream): name = "propertycall" path = "/properties/call?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1607,6 +1616,7 @@ class PropertyMeetingStream(HubspotStream): name = "propertymeeting" path = "/properties/meeting?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1714,6 +1724,7 @@ class PropertyTaskStream(HubspotStream): name = "propertytask" path = "/properties/task?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1821,6 +1832,7 @@ class PropertyCommunicationStream(HubspotStream): name = "propertycommunication" path = "/properties/task?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1928,6 +1940,7 @@ class PropertyNotesStream(HubspotStream): name = "property" path = "/properties/notes?fields={}".format(columns) + primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -3507,3 +3520,4 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: results = resp_json yield from results + From 89ddc7fe98ce1e46ecdec3e9cb819d86f260b7b4 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Tue, 4 Jul 2023 09:44:00 -0400 Subject: [PATCH 090/105] Add legacy endpoint section in readme, update permissions section format --- README.md | 41 ++++++++++++++++++++++++++--------------- tap_hubspot/streams.py | 1 - 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 765aacf..2a6131b 100644 --- a/README.md +++ b/README.md @@ -59,21 +59,21 @@ A Hubspot access token is required to make API requests. (See [Hubspot API](http The following scopes need to be added to your access token to access the following endpoints: -Contacts: `crm.schemas.contacts.read` or `crm.objects.contacts.read` -Users: `settings.users.read` -Ticket Pipeline: `media_bridge.read` or `crm.schemas.custom.read` or `timeline` or `tickets` or `e-commerce` or `crm.objects.goals.read` -Deal Pipeline: `media_bridge.read` or `crm.schemas.custom.read` or `timeline` or `tickets` or `e-commerce` or `crm.objects.goals.read` -Properties: All of `Tickets`, `crm.objects.deals.read`, `sales-email-read`, `crm.objects.contacts.read`, `crm.objects.companies.read`, `e-commerce`, `crm.objects.quotes.read` -Owners: `crm.objects.owners.read` -Companies: `crm.objects.companies.read` -Deals: `crm.objects.deals.read` -Feedback Submissions: `crm.objects.contacts.read` -Line Items: `e-commerce` -Products: `e-commerce` -Tickets: `tickets` -Quotes: `crm.objects.quotes.read` or `crm.schemas.quotes.read` -Goals: `crm.objects.goals.read` -Emails: `sales-email-read` +- Contacts: `crm.schemas.contacts.read` or `crm.objects.contacts.read` +- Users: `settings.users.read` +- Ticket Pipeline: `media_bridge.read` or `crm.schemas.custom.read` or `timeline` or `tickets` or `e-commerce` or `crm.objects.goals.read` +- Deal Pipeline: `media_bridge.read` or `crm.schemas.custom.read` or `timeline` or `tickets` or `e-commerce` or `crm.objects.goals.read` +- Properties: All of `Tickets`, `crm.objects.deals.read`, `sales-email-read`, `crm.objects.contacts.read`, `crm.objects.companies.read`, `e-commerce`, `crm.objects.quotes.read` +- Owners: `crm.objects.owners.read` +- Companies: `crm.objects.companies.read` +- Deals: `crm.objects.deals.read` +- Feedback Submissions: `crm.objects.contacts.read` +- Line Items: `e-commerce` +- Products: `e-commerce` +- Tickets: `tickets` +- Quotes: `crm.objects.quotes.read` or `crm.schemas.quotes.read` +- Goals: `crm.objects.goals.read` +- Emails: `sales-email-read` For more info on the streams and permissions, check the [Hubspot API Documentation](https://developers.hubspot.com/docs/api/overview). @@ -81,6 +81,17 @@ For more info on the streams and permissions, check the [Hubspot API Documentati You can easily run `tap-hubspot` by itself or in a pipeline using [Meltano](https://meltano.com/). + +### Streams Using v1 Endpoints + +The following Streams use the v1 (legacy) endpoint in the Hubspot API: + +1. [TicketPipeline & DealPipeline](https://legacydocs.hubspot.com/docs/methods/pipelines/pipelines_overview): The v3 endpoint requires a pipeline ID parameter to make calls to the API. Because of this, +you are limited to only pulling data for a single pipeline ID from v3, whereas the v1 API allows you to pull from all pipelines. +2. [EmailSubscriptions](https://legacydocs.hubspot.com/docs/methods/email/email_subscriptions_overview): This endpoint requires you to set a single email address to pull subscription data, whereas in +the v1 endpoint allows you to pull data from all emails. + + ## Stream Inheritance This project uses parent-child streams. Learn more about them [here](https://gitlab.com/meltano/sdk/-/blob/main/docs/parent_streams.md). diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 3a1ec7a..80654da 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -3520,4 +3520,3 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: results = resp_json yield from results - From 8b9589903afaa0760d22c0c93a381653d252f25e Mon Sep 17 00:00:00 2001 From: Neil Gorman <106695118+NeilGorman104@users.noreply.github.com> Date: Tue, 4 Jul 2023 09:46:18 -0400 Subject: [PATCH 091/105] readme grammar fix --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2a6131b..0e219b2 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ The following Streams use the v1 (legacy) endpoint in the Hubspot API: 1. [TicketPipeline & DealPipeline](https://legacydocs.hubspot.com/docs/methods/pipelines/pipelines_overview): The v3 endpoint requires a pipeline ID parameter to make calls to the API. Because of this, you are limited to only pulling data for a single pipeline ID from v3, whereas the v1 API allows you to pull from all pipelines. -2. [EmailSubscriptions](https://legacydocs.hubspot.com/docs/methods/email/email_subscriptions_overview): This endpoint requires you to set a single email address to pull subscription data, whereas in +2. [EmailSubscriptions](https://legacydocs.hubspot.com/docs/methods/email/email_subscriptions_overview): The v3 endpoint requires you to set a single email address to pull subscription data, whereas the v1 endpoint allows you to pull data from all emails. From d6146be6b98a5c9f5df35ac318c8a8c425cd6b38 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Tue, 4 Jul 2023 09:50:02 -0400 Subject: [PATCH 092/105] remove access token value in meltano.yml --- meltano.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/meltano.yml b/meltano.yml index 00329fe..d5b1801 100644 --- a/meltano.yml +++ b/meltano.yml @@ -15,7 +15,6 @@ plugins: - stream-maps settings: - name: access_token - value: ${TAP_HUBSPOT_ACCESS_TOKEN} kind: password - name: start_date value: '2023-01-01T00:00:00Z' From 0f37ef71d169e06e01479e6867a9a7493a47af8f Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 5 Jul 2023 11:40:44 -0400 Subject: [PATCH 093/105] update stream table names to plural --- tap_hubspot/streams.py | 54 +++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 80654da..5c2307d 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -37,7 +37,7 @@ class ContactStream(HubspotStream): vid, canonical-vid, merged-vids, portal-id, is-contact, properties """ - name = "contact" + name = "contacts" path = "/objects/contacts" primary_keys = ["id"] replication_key = "updatedAt" @@ -309,7 +309,7 @@ class TicketPipelineStream(HubspotStream): label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default """ - name = "ticketpipeline" + name = "ticketpipelines" path = "/pipelines/tickets?fields={}".format(columns) primary_keys = ["createdAt"] replication_key = "createdAt" @@ -419,7 +419,7 @@ class DealPipelineStream(HubspotStream): label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default """ - name = "dealpipeline" + name = "dealpipelines" path = "/pipelines/deals?fields={}".format(columns) primary_keys = ["createdAt"] replication_key = "createdAt" @@ -529,7 +529,7 @@ class EmailSubscriptionStream(HubspotStream): id, portalId, name, description, active, internal, category, channel, internalName, businessUnitId """ - name = "emailsubscription" + name = "emailsubscriptions" path = "/subscriptions/?fields={}".format(columns) primary_keys = ["id"] replication_key = "id" @@ -621,7 +621,7 @@ class PropertyTicketStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertyticket" + name = "propertytickets" path = "/properties/tickets?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -749,7 +749,7 @@ class PropertyDealStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertydeal" + name = "propertydeals" path = "/properties/deals?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -858,7 +858,7 @@ class PropertyContactStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertycontact" + name = "propertycontacts" path = "/properties/contacts?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -966,7 +966,7 @@ class PropertyCompanyStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertycompany" + name = "propertycompanies" path = "/properties/company?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -1074,7 +1074,7 @@ class PropertyProductStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertyproduct" + name = "propertyproducts" path = "/properties/product?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -1182,7 +1182,7 @@ class PropertyLineItemStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertylineitem" + name = "propertylineitems" path = "/properties/line_item?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -1290,7 +1290,7 @@ class PropertyEmailStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertyemail" + name = "propertyemails" path = "/properties/email?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -1398,7 +1398,7 @@ class PropertyPostalMailStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertypostalmail" + name = "propertypostalmails" path = "/properties/postal_mail?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -1506,7 +1506,7 @@ class PropertyCallStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertycall" + name = "propertycalls" path = "/properties/call?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -1614,7 +1614,7 @@ class PropertyMeetingStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertymeeting" + name = "propertymeetings" path = "/properties/meeting?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -1722,7 +1722,7 @@ class PropertyTaskStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertytask" + name = "propertytasks" path = "/properties/task?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -1830,7 +1830,7 @@ class PropertyCommunicationStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "propertycommunication" + name = "propertycommunications" path = "/properties/task?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -1938,7 +1938,7 @@ class PropertyNotesStream(HubspotStream): calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField """ - name = "property" + name = "properties" path = "/properties/notes?fields={}".format(columns) primary_keys = ["label"] replication_key = "updatedAt" @@ -2473,7 +2473,7 @@ class ProductStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "product" + name = "products" path = "/objects/products" primary_keys = ["id"] replication_key = "updatedAt" @@ -2570,7 +2570,7 @@ class TicketStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "ticket" + name = "tickets" path = "/objects/tickets" primary_keys = ["id"] replication_key = "updatedAt" @@ -2666,7 +2666,7 @@ class QuoteStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "quote" + name = "quotes" path = "/objects/quotes" primary_keys = ["id"] replication_key = "updatedAt" @@ -2763,7 +2763,7 @@ class GoalStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "goal" + name = "goals" path = "/objects/goal_targets" primary_keys = ["id"] replication_key = "updatedAt" @@ -2859,7 +2859,7 @@ class CallStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "call" + name = "calls" path = "/objects/calls" primary_keys = ["id"] replication_key = "updatedAt" @@ -2959,7 +2959,7 @@ class CommunicationStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "communication" + name = "communications" path = "/objects/Communications" primary_keys = ["id"] replication_key = "updatedAt" @@ -3053,7 +3053,7 @@ class EmailStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "email" + name = "emails" path = "/objects/emails" primary_keys = ["id"] replication_key = "updatedAt" @@ -3156,7 +3156,7 @@ class MeetingStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "meeting" + name = "meetings" path = "/objects/meetings" primary_keys = ["id"] replication_key = "updatedAt" @@ -3257,7 +3257,7 @@ class NoteStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "note" + name = "notes" path = "/objects/notes" primary_keys = ["id"] replication_key = "updatedAt" @@ -3443,7 +3443,7 @@ class TaskStream(HubspotStream): id, properties, createdAt, updatedAt, archived """ - name = "task" + name = "tasks" path = "/objects/tasks" primary_keys = ["id"] replication_key = "updatedAt" From 98172718312653bad914b9f10dbcd48a4d2e94c8 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 5 Jul 2023 12:02:22 -0400 Subject: [PATCH 094/105] remove redundant columns variable from streams --- tap_hubspot/streams.py | 198 +++++------------------------------------ 1 file changed, 20 insertions(+), 178 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 5c2307d..d4087e7 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -25,7 +25,6 @@ class ContactStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -33,10 +32,6 @@ class ContactStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - vid, canonical-vid, merged-vids, portal-id, is-contact, properties - """ - name = "contacts" path = "/objects/contacts" primary_keys = ["id"] @@ -123,7 +118,6 @@ class UsersStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -131,12 +125,8 @@ class UsersStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, email, roleIds, primaryteamid - """ - name = "users" - path = "/users?fields={}".format(columns) + path = "/users" primary_keys = ["id"] replication_key = "id" replication_method = "incremental" @@ -208,7 +198,6 @@ class OwnersStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -216,12 +205,10 @@ class OwnersStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, email, firstName, lastName, userId, createdAt, updatedAt, archived - """ + name = "owners" - path = "/owners?fields={}".format(columns) + path = "/owners" primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" @@ -297,7 +284,6 @@ class TicketPipelineStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -305,12 +291,9 @@ class TicketPipelineStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default - """ name = "ticketpipelines" - path = "/pipelines/tickets?fields={}".format(columns) + path = "/pipelines/tickets" primary_keys = ["createdAt"] replication_key = "createdAt" replication_method = "incremental" @@ -407,7 +390,6 @@ class DealPipelineStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -415,12 +397,9 @@ class DealPipelineStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - label, displayOrder, active, stages, objectType, objectTypeId, pipelineId, createdAt, updatedAt, default - """ name = "dealpipelines" - path = "/pipelines/deals?fields={}".format(columns) + path = "/pipelines/deals" primary_keys = ["createdAt"] replication_key = "createdAt" replication_method = "incremental" @@ -517,7 +496,6 @@ class EmailSubscriptionStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -525,12 +503,9 @@ class EmailSubscriptionStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, portalId, name, description, active, internal, category, channel, internalName, businessUnitId - """ name = "emailsubscriptions" - path = "/subscriptions/?fields={}".format(columns) + path = "/subscriptions" primary_keys = ["id"] replication_key = "id" replication_method = "incremental" @@ -608,7 +583,6 @@ class PropertyTicketStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -616,13 +590,9 @@ class PropertyTicketStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertytickets" - path = "/properties/tickets?fields={}".format(columns) + path = "/properties/tickets" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -736,7 +706,6 @@ class PropertyDealStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -744,13 +713,9 @@ class PropertyDealStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertydeals" - path = "/properties/deals?fields={}".format(columns) + path = "/properties/deals" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -845,7 +810,6 @@ class PropertyContactStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -853,13 +817,9 @@ class PropertyContactStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertycontacts" - path = "/properties/contacts?fields={}".format(columns) + path = "/properties/contacts" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -953,7 +913,6 @@ class PropertyCompanyStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -961,13 +920,9 @@ class PropertyCompanyStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertycompanies" - path = "/properties/company?fields={}".format(columns) + path = "/properties/company" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1061,7 +1016,6 @@ class PropertyProductStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -1069,13 +1023,9 @@ class PropertyProductStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertyproducts" - path = "/properties/product?fields={}".format(columns) + path = "/properties/product" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1169,7 +1119,6 @@ class PropertyLineItemStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -1177,13 +1126,9 @@ class PropertyLineItemStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertylineitems" - path = "/properties/line_item?fields={}".format(columns) + path = "/properties/line_item" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1277,7 +1222,6 @@ class PropertyEmailStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -1285,13 +1229,9 @@ class PropertyEmailStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertyemails" - path = "/properties/email?fields={}".format(columns) + path = "/properties/email" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1385,7 +1325,6 @@ class PropertyPostalMailStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -1393,13 +1332,9 @@ class PropertyPostalMailStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertypostalmails" - path = "/properties/postal_mail?fields={}".format(columns) + path = "/properties/postal_mail" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1493,7 +1428,6 @@ class PropertyCallStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -1501,13 +1435,9 @@ class PropertyCallStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertycalls" - path = "/properties/call?fields={}".format(columns) + path = "/properties/call" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1601,7 +1531,6 @@ class PropertyMeetingStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -1609,13 +1538,9 @@ class PropertyMeetingStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertymeetings" - path = "/properties/meeting?fields={}".format(columns) + path = "/properties/meeting" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1709,7 +1634,6 @@ class PropertyTaskStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -1717,13 +1641,9 @@ class PropertyTaskStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "propertytasks" - path = "/properties/task?fields={}".format(columns) + path = "/properties/task" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1817,7 +1737,6 @@ class PropertyCommunicationStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -1825,13 +1744,10 @@ class PropertyCommunicationStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ + name = "propertycommunications" - path = "/properties/task?fields={}".format(columns) + path = "/properties/task" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -1925,7 +1841,6 @@ class PropertyNotesStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -1933,13 +1848,9 @@ class PropertyNotesStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - updatedAt, createdAt, name, label, type, fieldType, description, groupName, options, displayOrder, - calculated, externalOptions, hasUniqueValue, hidden, hubspotDefined, modificationMetadata, formField - """ name = "properties" - path = "/properties/notes?fields={}".format(columns) + path = "/properties/notes" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" @@ -2072,7 +1983,6 @@ class CompanyStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2080,9 +1990,6 @@ class CompanyStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ name = "companies" path = "/objects/companies" @@ -2169,7 +2076,6 @@ class DealStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2177,10 +2083,6 @@ class DealStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ - name = "deals" path = "/objects/deals" primary_keys = ["id"] @@ -2266,7 +2168,6 @@ class FeedbackSubmissionsStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2274,10 +2175,6 @@ class FeedbackSubmissionsStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ - name = "feedbacksubmissions" path = "/objects/feedback_submissions" primary_keys = ["id"] @@ -2364,7 +2261,6 @@ class LineItemStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2372,10 +2268,6 @@ class LineItemStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ - name = "lineitems" path = "/objects/line_items" primary_keys = ["id"] @@ -2461,7 +2353,6 @@ class ProductStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2469,10 +2360,6 @@ class ProductStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ - name = "products" path = "/objects/products" primary_keys = ["id"] @@ -2558,7 +2445,6 @@ class TicketStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2566,10 +2452,6 @@ class TicketStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ - name = "tickets" path = "/objects/tickets" primary_keys = ["id"] @@ -2654,7 +2536,6 @@ class QuoteStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2662,10 +2543,6 @@ class QuoteStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ - name = "quotes" path = "/objects/quotes" primary_keys = ["id"] @@ -2751,7 +2628,6 @@ class GoalStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2759,10 +2635,6 @@ class GoalStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ - name = "goals" path = "/objects/goal_targets" primary_keys = ["id"] @@ -2847,7 +2719,6 @@ class CallStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2855,10 +2726,6 @@ class CallStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ - name = "calls" path = "/objects/calls" primary_keys = ["id"] @@ -2947,7 +2814,6 @@ class CommunicationStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -2955,9 +2821,6 @@ class CommunicationStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ name = "communications" path = "/objects/Communications" @@ -3041,7 +2904,6 @@ class EmailStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -3049,9 +2911,6 @@ class EmailStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ name = "emails" path = "/objects/emails" @@ -3144,7 +3003,6 @@ class MeetingStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -3152,9 +3010,6 @@ class MeetingStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ name = "meetings" path = "/objects/meetings" @@ -3245,7 +3100,6 @@ class NoteStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -3253,9 +3107,6 @@ class NoteStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ name = "notes" path = "/objects/notes" @@ -3339,7 +3190,6 @@ class PostalMailStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -3347,10 +3197,6 @@ class PostalMailStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ - name = "postalmail" path = "/objects/postal_mail" primary_keys = ["id"] @@ -3431,7 +3277,6 @@ class TaskStream(HubspotStream): """ """ - columns: columns which will be added to fields parameter in api name: stream name path: path which will be added to api url in client.py schema: instream schema @@ -3439,9 +3284,6 @@ class TaskStream(HubspotStream): replication_key = datetime keys for replication """ - columns = """ - id, properties, createdAt, updatedAt, archived - """ name = "tasks" path = "/objects/tasks" From f03962bd394be09c2ad6009892785a29957baf9e Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 5 Jul 2023 12:08:16 -0400 Subject: [PATCH 095/105] hotfix: property communications path --- tap_hubspot/streams.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index d4087e7..3fd113a 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -1747,7 +1747,7 @@ class PropertyCommunicationStream(HubspotStream): name = "propertycommunications" - path = "/properties/task" + path = "/properties/communication" primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" From fa11d9f5a287c4aefd3f00aa6e6cd8e3c56df564 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 5 Jul 2023 12:10:13 -0400 Subject: [PATCH 096/105] remove whitespace --- tap_hubspot/streams.py | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 3fd113a..59bec2e 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -205,8 +205,6 @@ class OwnersStream(HubspotStream): replication_key = datetime keys for replication """ - - name = "owners" path = "/owners" primary_keys = ["id"] @@ -291,7 +289,6 @@ class TicketPipelineStream(HubspotStream): replication_key = datetime keys for replication """ - name = "ticketpipelines" path = "/pipelines/tickets" primary_keys = ["createdAt"] @@ -397,7 +394,6 @@ class DealPipelineStream(HubspotStream): replication_key = datetime keys for replication """ - name = "dealpipelines" path = "/pipelines/deals" primary_keys = ["createdAt"] @@ -503,7 +499,6 @@ class EmailSubscriptionStream(HubspotStream): replication_key = datetime keys for replication """ - name = "emailsubscriptions" path = "/subscriptions" primary_keys = ["id"] @@ -590,7 +585,6 @@ class PropertyTicketStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertytickets" path = "/properties/tickets" primary_keys = ["label"] @@ -713,7 +707,6 @@ class PropertyDealStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertydeals" path = "/properties/deals" primary_keys = ["label"] @@ -817,7 +810,6 @@ class PropertyContactStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertycontacts" path = "/properties/contacts" primary_keys = ["label"] @@ -920,7 +912,6 @@ class PropertyCompanyStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertycompanies" path = "/properties/company" primary_keys = ["label"] @@ -1023,7 +1014,6 @@ class PropertyProductStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertyproducts" path = "/properties/product" primary_keys = ["label"] @@ -1126,7 +1116,6 @@ class PropertyLineItemStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertylineitems" path = "/properties/line_item" primary_keys = ["label"] @@ -1229,7 +1218,6 @@ class PropertyEmailStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertyemails" path = "/properties/email" primary_keys = ["label"] @@ -1332,7 +1320,6 @@ class PropertyPostalMailStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertypostalmails" path = "/properties/postal_mail" primary_keys = ["label"] @@ -1435,7 +1422,6 @@ class PropertyCallStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertycalls" path = "/properties/call" primary_keys = ["label"] @@ -1538,7 +1524,6 @@ class PropertyMeetingStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertymeetings" path = "/properties/meeting" primary_keys = ["label"] @@ -1641,7 +1626,6 @@ class PropertyTaskStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertytasks" path = "/properties/task" primary_keys = ["label"] @@ -1744,8 +1728,6 @@ class PropertyCommunicationStream(HubspotStream): replication_key = datetime keys for replication """ - - name = "propertycommunications" path = "/properties/communication" primary_keys = ["label"] @@ -1848,7 +1830,6 @@ class PropertyNotesStream(HubspotStream): replication_key = datetime keys for replication """ - name = "properties" path = "/properties/notes" primary_keys = ["label"] @@ -1990,7 +1971,6 @@ class CompanyStream(HubspotStream): replication_key = datetime keys for replication """ - name = "companies" path = "/objects/companies" primary_keys = ["id"] @@ -2821,7 +2801,6 @@ class CommunicationStream(HubspotStream): replication_key = datetime keys for replication """ - name = "communications" path = "/objects/Communications" primary_keys = ["id"] @@ -2911,7 +2890,6 @@ class EmailStream(HubspotStream): replication_key = datetime keys for replication """ - name = "emails" path = "/objects/emails" primary_keys = ["id"] @@ -3010,7 +2988,6 @@ class MeetingStream(HubspotStream): replication_key = datetime keys for replication """ - name = "meetings" path = "/objects/meetings" primary_keys = ["id"] @@ -3107,7 +3084,6 @@ class NoteStream(HubspotStream): replication_key = datetime keys for replication """ - name = "notes" path = "/objects/notes" primary_keys = ["id"] @@ -3284,7 +3260,6 @@ class TaskStream(HubspotStream): replication_key = datetime keys for replication """ - name = "tasks" path = "/objects/tasks" primary_keys = ["id"] From 8b1222781d48317be95c4b2e9d5288aa5290d795 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 5 Jul 2023 12:14:07 -0400 Subject: [PATCH 097/105] gitignore loader files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 650746d..41ba0e8 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ share/python-wheels/ .idea .meltano MANIFEST +target-* # PyInstaller # Usually these files are written by a python script from a template From f59bda131e068162f4608918e5be67dca26b17e3 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Wed, 5 Jul 2023 16:27:30 -0400 Subject: [PATCH 098/105] update table names to snake case --- tap_hubspot/streams.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 59bec2e..db0ed2d 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -289,7 +289,7 @@ class TicketPipelineStream(HubspotStream): replication_key = datetime keys for replication """ - name = "ticketpipelines" + name = "ticket_pipelines" path = "/pipelines/tickets" primary_keys = ["createdAt"] replication_key = "createdAt" @@ -394,7 +394,7 @@ class DealPipelineStream(HubspotStream): replication_key = datetime keys for replication """ - name = "dealpipelines" + name = "deal_pipelines" path = "/pipelines/deals" primary_keys = ["createdAt"] replication_key = "createdAt" @@ -499,7 +499,7 @@ class EmailSubscriptionStream(HubspotStream): replication_key = datetime keys for replication """ - name = "emailsubscriptions" + name = "email_subscriptions" path = "/subscriptions" primary_keys = ["id"] replication_key = "id" @@ -585,7 +585,7 @@ class PropertyTicketStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertytickets" + name = "property_tickets" path = "/properties/tickets" primary_keys = ["label"] replication_key = "updatedAt" @@ -707,7 +707,7 @@ class PropertyDealStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertydeals" + name = "property_deals" path = "/properties/deals" primary_keys = ["label"] replication_key = "updatedAt" @@ -810,7 +810,7 @@ class PropertyContactStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertycontacts" + name = "property_contacts" path = "/properties/contacts" primary_keys = ["label"] replication_key = "updatedAt" @@ -912,7 +912,7 @@ class PropertyCompanyStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertycompanies" + name = "property_companies" path = "/properties/company" primary_keys = ["label"] replication_key = "updatedAt" @@ -1014,7 +1014,7 @@ class PropertyProductStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertyproducts" + name = "property_products" path = "/properties/product" primary_keys = ["label"] replication_key = "updatedAt" @@ -1116,7 +1116,7 @@ class PropertyLineItemStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertylineitems" + name = "property_line_items" path = "/properties/line_item" primary_keys = ["label"] replication_key = "updatedAt" @@ -1218,7 +1218,7 @@ class PropertyEmailStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertyemails" + name = "property_emails" path = "/properties/email" primary_keys = ["label"] replication_key = "updatedAt" @@ -1320,7 +1320,7 @@ class PropertyPostalMailStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertypostalmails" + name = "property_postal_mails" path = "/properties/postal_mail" primary_keys = ["label"] replication_key = "updatedAt" @@ -1422,7 +1422,7 @@ class PropertyCallStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertycalls" + name = "property_calls" path = "/properties/call" primary_keys = ["label"] replication_key = "updatedAt" @@ -1524,7 +1524,7 @@ class PropertyMeetingStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertymeetings" + name = "property_meetings" path = "/properties/meeting" primary_keys = ["label"] replication_key = "updatedAt" @@ -1626,7 +1626,7 @@ class PropertyTaskStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertytasks" + name = "property_tasks" path = "/properties/task" primary_keys = ["label"] replication_key = "updatedAt" @@ -1728,7 +1728,7 @@ class PropertyCommunicationStream(HubspotStream): replication_key = datetime keys for replication """ - name = "propertycommunications" + name = "property_communications" path = "/properties/communication" primary_keys = ["label"] replication_key = "updatedAt" @@ -2155,7 +2155,7 @@ class FeedbackSubmissionsStream(HubspotStream): replication_key = datetime keys for replication """ - name = "feedbacksubmissions" + name = "feedback_submissions" path = "/objects/feedback_submissions" primary_keys = ["id"] replication_key = "updatedAt" @@ -2248,7 +2248,7 @@ class LineItemStream(HubspotStream): replication_key = datetime keys for replication """ - name = "lineitems" + name = "line_items" path = "/objects/line_items" primary_keys = ["id"] replication_key = "updatedAt" @@ -3173,7 +3173,7 @@ class PostalMailStream(HubspotStream): replication_key = datetime keys for replication """ - name = "postalmail" + name = "postal_mail" path = "/objects/postal_mail" primary_keys = ["id"] replication_key = "updatedAt" From a7f2deee5527701477b1eaba2120d13dbd0aa811 Mon Sep 17 00:00:00 2001 From: Dhrruv Date: Thu, 6 Jul 2023 14:31:07 +0530 Subject: [PATCH 099/105] added records jsonpath --- tap_hubspot/streams.py | 741 +++-------------------------------------- tap_hubspot/tap.py | 9 +- 2 files changed, 55 insertions(+), 695 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index db0ed2d..543b64f 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -89,26 +89,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class UsersStream(HubspotStream): @@ -169,26 +150,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class OwnersStream(HubspotStream): @@ -253,26 +215,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class TicketPipelineStream(HubspotStream): @@ -358,26 +301,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class DealPipelineStream(HubspotStream): @@ -463,26 +387,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class EmailSubscriptionStream(HubspotStream): @@ -549,26 +454,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("subscriptionDefinitions") is not None: - results = resp_json["subscriptionDefinitions"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[subscriptionDefinitions][*]" # Or override `parse_response`. class PropertyTicketStream(HubspotStream): @@ -662,27 +548,6 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -691,6 +556,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: row["hubspot_object"] = "ticket" return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyDealStream(HubspotStream): @@ -762,27 +629,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -794,6 +640,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyContactStream(HubspotStream): @@ -864,27 +712,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -896,6 +723,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyCompanyStream(HubspotStream): @@ -966,27 +795,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -998,6 +806,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyProductStream(HubspotStream): @@ -1068,27 +878,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -1100,6 +889,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyLineItemStream(HubspotStream): @@ -1170,27 +961,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -1202,6 +972,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyEmailStream(HubspotStream): @@ -1272,27 +1044,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -1304,6 +1055,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyPostalMailStream(HubspotStream): @@ -1374,27 +1127,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -1406,6 +1138,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyCallStream(HubspotStream): @@ -1476,27 +1210,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -1508,6 +1221,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyMeetingStream(HubspotStream): @@ -1578,27 +1293,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -1610,6 +1304,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyTaskStream(HubspotStream): @@ -1680,27 +1376,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -1712,6 +1387,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyCommunicationStream(HubspotStream): @@ -1782,27 +1459,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -1814,6 +1470,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PropertyNotesStream(HubspotStream): @@ -1884,27 +1542,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results - def post_process(self, row: dict, context: dict | None = None) -> dict | None: """ Returns api records with added columns @@ -1916,6 +1553,8 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) + + records_jsonpath = "$[results][*]" # Or override `parse_response`. def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: """ @@ -2028,26 +1667,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class DealStream(HubspotStream): @@ -2120,26 +1740,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class FeedbackSubmissionsStream(HubspotStream): @@ -2213,26 +1814,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class LineItemStream(HubspotStream): @@ -2305,26 +1887,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class ProductStream(HubspotStream): @@ -2397,26 +1960,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class TicketStream(HubspotStream): @@ -2488,26 +2032,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class QuoteStream(HubspotStream): @@ -2580,26 +2105,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class GoalStream(HubspotStream): @@ -2671,26 +2177,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class CallStream(HubspotStream): @@ -2766,26 +2253,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class CommunicationStream(HubspotStream): @@ -2855,26 +2323,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class EmailStream(HubspotStream): @@ -2953,26 +2402,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class MeetingStream(HubspotStream): @@ -3049,26 +2479,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class NoteStream(HubspotStream): @@ -3138,26 +2549,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class PostalMailStream(HubspotStream): @@ -3225,26 +2617,7 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. class TaskStream(HubspotStream): @@ -3317,23 +2690,5 @@ def get_url_params( return params - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result records. - - Args: - response: The HTTP ``requests.Response`` object. - - Yields: - Each record from the source. - """ - - resp_json = response.json() - - if isinstance(resp_json, list): - results = resp_json - elif resp_json.get("results") is not None: - results = resp_json["results"] - else: - results = resp_json - - yield from results + records_jsonpath = "$[results][*]" # Or override `parse_response`. + diff --git a/tap_hubspot/tap.py b/tap_hubspot/tap.py index 86cfb2d..1a661ea 100644 --- a/tap_hubspot/tap.py +++ b/tap_hubspot/tap.py @@ -18,12 +18,17 @@ class TapHubspot(Tap): "access_token", th.StringType, required=True, - description="The token to authenticate against the API service", + description="Token to authenticate against the API service", ), th.Property( "start_date", th.DateTimeType, - description="The earliest record date to sync", + description="Earliest record date to sync", + ), + th.Property( + "end_date", + th.DateTimeType, + description="Latest record date to sync", ), ).to_dict() From 6f917cb48c5ee80cc9027d4358b288ff4255be43 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 6 Jul 2023 12:27:10 -0400 Subject: [PATCH 100/105] Sync poetry.lock file --- poetry.lock | 1283 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1283 insertions(+) create mode 100644 poetry.lock diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..b5df3c3 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1283 @@ +# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. + +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] + +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +category = "main" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "boto3" +version = "1.27.0" +description = "The AWS SDK for Python" +category = "main" +optional = true +python-versions = ">= 3.7" +files = [ + {file = "boto3-1.27.0-py3-none-any.whl", hash = "sha256:e1e535e9fb23977252f13652ed2fa9b4f2d59a53b04a5f2fad3ee415b6a3b2b0"}, + {file = "boto3-1.27.0.tar.gz", hash = "sha256:908f9c277325d68963dfcfce963a05336f0eb19505fc239c0ab9d01f4cba0296"}, +] + +[package.dependencies] +botocore = ">=1.30.0,<1.31.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.6.0,<0.7.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.30.0" +description = "Low-level, data-driven core of boto 3." +category = "main" +optional = true +python-versions = ">= 3.7" +files = [ + {file = "botocore-1.30.0-py3-none-any.whl", hash = "sha256:cac1333f41ec98e6f75bbba3f2c74b9e76aa3847469ecea6e7773a0af0049bee"}, + {file = "botocore-1.30.0.tar.gz", hash = "sha256:b9cb5b78a289f0615a48d85066f01869029aa41b95993f2c0c55003df037c23f"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = ">=1.25.4,<1.27" + +[package.extras] +crt = ["awscrt (==0.16.9)"] + +[[package]] +name = "cached-property" +version = "1.5.2" +description = "A decorator for caching properties in classes." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "cached-property-1.5.2.tar.gz", hash = "sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130"}, + {file = "cached_property-1.5.2-py2.py3-none-any.whl", hash = "sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0"}, +] + +[[package]] +name = "certifi" +version = "2023.5.7" +description = "Python package for providing Mozilla's CA Bundle." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, + {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, +] + +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.1.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +category = "main" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, + {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, +] + +[[package]] +name = "click" +version = "8.1.3" +description = "Composable command line interface toolkit" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "cryptography" +version = "40.0.2" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:8f79b5ff5ad9d3218afb1e7e20ea74da5f76943ee5edb7f76e56ec5161ec782b"}, + {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:05dc219433b14046c476f6f09d7636b92a1c3e5808b9a6536adf4932b3b2c440"}, + {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4df2af28d7bedc84fe45bd49bc35d710aede676e2a4cb7fc6d103a2adc8afe4d"}, + {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dcca15d3a19a66e63662dc8d30f8036b07be851a8680eda92d079868f106288"}, + {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:a04386fb7bc85fab9cd51b6308633a3c271e3d0d3eae917eebab2fac6219b6d2"}, + {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:adc0d980fd2760c9e5de537c28935cc32b9353baaf28e0814df417619c6c8c3b"}, + {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d5a1bd0e9e2031465761dfa920c16b0065ad77321d8a8c1f5ee331021fda65e9"}, + {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a95f4802d49faa6a674242e25bfeea6fc2acd915b5e5e29ac90a32b1139cae1c"}, + {file = "cryptography-40.0.2-cp36-abi3-win32.whl", hash = "sha256:aecbb1592b0188e030cb01f82d12556cf72e218280f621deed7d806afd2113f9"}, + {file = "cryptography-40.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:b12794f01d4cacfbd3177b9042198f3af1c856eedd0a98f10f141385c809a14b"}, + {file = "cryptography-40.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:142bae539ef28a1c76794cca7f49729e7c54423f615cfd9b0b1fa90ebe53244b"}, + {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:956ba8701b4ffe91ba59665ed170a2ebbdc6fc0e40de5f6059195d9f2b33ca0e"}, + {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f01c9863da784558165f5d4d916093737a75203a5c5286fde60e503e4276c7a"}, + {file = "cryptography-40.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3daf9b114213f8ba460b829a02896789751626a2a4e7a43a28ee77c04b5e4958"}, + {file = "cryptography-40.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48f388d0d153350f378c7f7b41497a54ff1513c816bcbbcafe5b829e59b9ce5b"}, + {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c0764e72b36a3dc065c155e5b22f93df465da9c39af65516fe04ed3c68c92636"}, + {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:cbaba590180cba88cb99a5f76f90808a624f18b169b90a4abb40c1fd8c19420e"}, + {file = "cryptography-40.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7a38250f433cd41df7fcb763caa3ee9362777fdb4dc642b9a349721d2bf47404"}, + {file = "cryptography-40.0.2.tar.gz", hash = "sha256:c33c0d32b8594fa647d2e01dbccc303478e16fdd7cf98652d5b3ed11aa5e5c99"}, +] + +[package.dependencies] +cffi = ">=1.12" + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +pep8test = ["black", "check-manifest", "mypy", "ruff"] +sdist = ["setuptools-rust (>=0.11.4)"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] +tox = ["tox"] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +category = "main" +optional = false +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.1.2" +description = "Backport of PEP 654 (exception groups)" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.2-py3-none-any.whl", hash = "sha256:e346e69d186172ca7cf029c8c1d16235aa0e04035e5750b4b95039e65204328f"}, + {file = "exceptiongroup-1.1.2.tar.gz", hash = "sha256:12c3e887d6485d16943a309616de20ae5582633e0a2eda17f4e10fd61c1e8af5"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "fs" +version = "2.4.16" +description = "Python's filesystem abstraction layer" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "fs-2.4.16-py2.py3-none-any.whl", hash = "sha256:660064febbccda264ae0b6bace80a8d1be9e089e0a5eb2427b7d517f9a91545c"}, + {file = "fs-2.4.16.tar.gz", hash = "sha256:ae97c7d51213f4b70b6a958292530289090de3a7e15841e108fbe144f069d313"}, +] + +[package.dependencies] +appdirs = ">=1.4.3,<1.5.0" +setuptools = "*" +six = ">=1.10,<2.0" + +[package.extras] +scandir = ["scandir (>=1.5,<2.0)"] + +[[package]] +name = "fs-s3fs" +version = "1.1.1" +description = "Amazon S3 filesystem for PyFilesystem2" +category = "main" +optional = true +python-versions = "*" +files = [ + {file = "fs-s3fs-1.1.1.tar.gz", hash = "sha256:b57f8c7664460ff7b451b4b44ca2ea9623a374d74e1284c2d5e6df499dc7976c"}, + {file = "fs_s3fs-1.1.1-py2.py3-none-any.whl", hash = "sha256:9ba160eaa93390cc5992a857675666cb2fbb3721b872474dfdc659a715c39280"}, +] + +[package.dependencies] +boto3 = ">=1.9,<2.0" +fs = ">=2.4,<3.0" +six = ">=1.10,<2.0" + +[[package]] +name = "greenlet" +version = "2.0.2" +description = "Lightweight in-process concurrent programming" +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" +files = [ + {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, + {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, + {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, + {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, + {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, + {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, + {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, + {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, + {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, + {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, + {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, + {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, + {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, + {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, + {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, + {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, + {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, + {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, + {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, + {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, + {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, + {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, +] + +[package.extras] +docs = ["Sphinx", "docutils (<0.18)"] +test = ["objgraph", "psutil"] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +category = "main" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "importlib-metadata" +version = "4.13.0" +description = "Read metadata from Python packages" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-4.13.0-py3-none-any.whl", hash = "sha256:8a8a81bcf996e74fee46f0d16bd3eaa382a7eb20fd82445c3ad11f4090334116"}, + {file = "importlib_metadata-4.13.0.tar.gz", hash = "sha256:dd0173e8f150d6815e098fd354f6414b0f079af4644ddfe90c71e2fc6174346d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +perf = ["ipython"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] + +[[package]] +name = "importlib-resources" +version = "5.12.0" +description = "Read resources from Python packages" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, + {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[[package]] +name = "inflection" +version = "0.5.1" +description = "A port of Ruby on Rails inflector to Python" +category = "main" +optional = false +python-versions = ">=3.5" +files = [ + {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"}, + {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +category = "main" +optional = true +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "joblib" +version = "1.3.1" +description = "Lightweight pipelining with Python functions" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "joblib-1.3.1-py3-none-any.whl", hash = "sha256:89cf0529520e01b3de7ac7b74a8102c90d16d54c64b5dd98cafcd14307fdf915"}, + {file = "joblib-1.3.1.tar.gz", hash = "sha256:1f937906df65329ba98013dc9692fe22a4c5e4a648112de500508b18a21b41e3"}, +] + +[[package]] +name = "jsonpath-ng" +version = "1.5.3" +description = "A final implementation of JSONPath for Python that aims to be standard compliant, including arithmetic and binary comparison operators and providing clear AST for metaprogramming." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "jsonpath-ng-1.5.3.tar.gz", hash = "sha256:a273b182a82c1256daab86a313b937059261b5c5f8c4fa3fc38b882b344dd567"}, + {file = "jsonpath_ng-1.5.3-py2-none-any.whl", hash = "sha256:f75b95dbecb8a0f3b86fd2ead21c2b022c3f5770957492b9b6196ecccfeb10aa"}, + {file = "jsonpath_ng-1.5.3-py3-none-any.whl", hash = "sha256:292a93569d74029ba75ac2dc3d3630fc0e17b2df26119a165fa1d498ca47bf65"}, +] + +[package.dependencies] +decorator = "*" +ply = "*" +six = "*" + +[[package]] +name = "jsonschema" +version = "4.17.3" +description = "An implementation of JSON Schema validation for Python" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, + {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, +] + +[package.dependencies] +attrs = ">=17.4.0" +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} +pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" +typing-extensions = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "memoization" +version = "0.4.0" +description = "A powerful caching library for Python, with TTL support and multiple algorithm options. (https://github.com/lonelyenvoy/python-memoization)" +category = "main" +optional = false +python-versions = ">=3, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" +files = [ + {file = "memoization-0.4.0.tar.gz", hash = "sha256:fde5e7cd060ef45b135e0310cfec17b2029dc472ccb5bbbbb42a503d4538a135"}, +] + +[[package]] +name = "packaging" +version = "23.1" +description = "Core utilities for Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, +] + +[[package]] +name = "pendulum" +version = "2.1.2" +description = "Python datetimes made easy" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "pendulum-2.1.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:b6c352f4bd32dff1ea7066bd31ad0f71f8d8100b9ff709fb343f3b86cee43efe"}, + {file = "pendulum-2.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:318f72f62e8e23cd6660dbafe1e346950281a9aed144b5c596b2ddabc1d19739"}, + {file = "pendulum-2.1.2-cp35-cp35m-macosx_10_15_x86_64.whl", hash = "sha256:0731f0c661a3cb779d398803655494893c9f581f6488048b3fb629c2342b5394"}, + {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3481fad1dc3f6f6738bd575a951d3c15d4b4ce7c82dce37cf8ac1483fde6e8b0"}, + {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9702069c694306297ed362ce7e3c1ef8404ac8ede39f9b28b7c1a7ad8c3959e3"}, + {file = "pendulum-2.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:fb53ffa0085002ddd43b6ca61a7b34f2d4d7c3ed66f931fe599e1a531b42af9b"}, + {file = "pendulum-2.1.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:c501749fdd3d6f9e726086bf0cd4437281ed47e7bca132ddb522f86a1645d360"}, + {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:c807a578a532eeb226150d5006f156632df2cc8c5693d778324b43ff8c515dd0"}, + {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2d1619a721df661e506eff8db8614016f0720ac171fe80dda1333ee44e684087"}, + {file = "pendulum-2.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f888f2d2909a414680a29ae74d0592758f2b9fcdee3549887779cd4055e975db"}, + {file = "pendulum-2.1.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:e95d329384717c7bf627bf27e204bc3b15c8238fa8d9d9781d93712776c14002"}, + {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4c9c689747f39d0d02a9f94fcee737b34a5773803a64a5fdb046ee9cac7442c5"}, + {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1245cd0075a3c6d889f581f6325dd8404aca5884dea7223a5566c38aab94642b"}, + {file = "pendulum-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:db0a40d8bcd27b4fb46676e8eb3c732c67a5a5e6bfab8927028224fbced0b40b"}, + {file = "pendulum-2.1.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f5e236e7730cab1644e1b87aca3d2ff3e375a608542e90fe25685dae46310116"}, + {file = "pendulum-2.1.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:de42ea3e2943171a9e95141f2eecf972480636e8e484ccffaf1e833929e9e052"}, + {file = "pendulum-2.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7c5ec650cb4bec4c63a89a0242cc8c3cebcec92fcfe937c417ba18277d8560be"}, + {file = "pendulum-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:33fb61601083f3eb1d15edeb45274f73c63b3c44a8524703dc143f4212bf3269"}, + {file = "pendulum-2.1.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:29c40a6f2942376185728c9a0347d7c0f07905638c83007e1d262781f1e6953a"}, + {file = "pendulum-2.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:94b1fc947bfe38579b28e1cccb36f7e28a15e841f30384b5ad6c5e31055c85d7"}, + {file = "pendulum-2.1.2.tar.gz", hash = "sha256:b06a0ca1bfe41c990bbf0c029f0b6501a7f2ec4e38bfec730712015e8860f207"}, +] + +[package.dependencies] +python-dateutil = ">=2.6,<3.0" +pytzdata = ">=2020.1" + +[[package]] +name = "pkgutil-resolve-name" +version = "1.3.10" +description = "Resolve a name to an object." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] + +[[package]] +name = "pluggy" +version = "1.2.0" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pluggy-1.2.0-py3-none-any.whl", hash = "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849"}, + {file = "pluggy-1.2.0.tar.gz", hash = "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "ply" +version = "3.11" +description = "Python Lex & Yacc" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"}, + {file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"}, +] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pyjwt" +version = "2.7.0" +description = "JSON Web Token implementation in Python" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "PyJWT-2.7.0-py3-none-any.whl", hash = "sha256:ba2b425b15ad5ef12f200dc67dd56af4e26de2331f965c5439994dad075876e1"}, + {file = "PyJWT-2.7.0.tar.gz", hash = "sha256:bd6ca4a3c4285c1a2d4349e5a035fdf8fb94e04ccd0fcbe6ba289dae9cc3e074"}, +] + +[package.dependencies] +typing-extensions = {version = "*", markers = "python_version <= \"3.7\""} + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + +[[package]] +name = "pyrsistent" +version = "0.19.3" +description = "Persistent/Functional/Immutable data structures" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"}, + {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"}, + {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"}, + {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"}, + {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"}, + {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"}, +] + +[[package]] +name = "pytest" +version = "7.4.0" +description = "pytest: simple powerful testing with Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.0-py3-none-any.whl", hash = "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32"}, + {file = "pytest-7.4.0.tar.gz", hash = "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-durations" +version = "1.2.0" +description = "Pytest plugin reporting fixtures and test functions execution time." +category = "dev" +optional = false +python-versions = ">=3.6.2" +files = [ + {file = "pytest-durations-1.2.0.tar.gz", hash = "sha256:75793f7c2c393a947de4a92cc205e8dcb3d7fcde492628926cca97eb8e87077d"}, + {file = "pytest_durations-1.2.0-py3-none-any.whl", hash = "sha256:210c649d989fdf8e864b7f614966ca2c8be5b58a5224d60089a43618c146d7fb"}, +] + +[package.dependencies] +pytest = ">=4.6" + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "0.21.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "python-dotenv-0.21.1.tar.gz", hash = "sha256:1c93de8f636cde3ce377292818d0e440b6e45a82f215c3744979151fa8151c49"}, + {file = "python_dotenv-0.21.1-py3-none-any.whl", hash = "sha256:41e12e0318bebc859fcc4d97d4db8d20ad21721a6aa5047dd59f090391cb549a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pytz" +version = "2023.3" +description = "World timezone definitions, modern and historical" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, + {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, +] + +[[package]] +name = "pytzdata" +version = "2020.1" +description = "The Olson timezone database for Python." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pytzdata-2020.1-py2.py3-none-any.whl", hash = "sha256:e1e14750bcf95016381e4d472bad004eef710f2d6417240904070b3d6654485f"}, + {file = "pytzdata-2020.1.tar.gz", hash = "sha256:3efa13b335a00a8de1d345ae41ec78dd11c9f8807f522d39850f2dd828681540"}, +] + +[[package]] +name = "pyyaml" +version = "6.0" +description = "YAML parser and emitter for Python" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, + {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, + {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, + {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, + {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, + {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, + {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, + {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, + {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, + {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, + {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, + {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, + {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, + {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, + {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, + {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, + {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, + {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "s3transfer" +version = "0.6.1" +description = "An Amazon S3 Transfer Manager" +category = "main" +optional = true +python-versions = ">= 3.7" +files = [ + {file = "s3transfer-0.6.1-py3-none-any.whl", hash = "sha256:3c0da2d074bf35d6870ef157158641178a4204a6e689e82546083e31e0311346"}, + {file = "s3transfer-0.6.1.tar.gz", hash = "sha256:640bb492711f4c0c0905e1f62b6aaeb771881935ad27884852411f8e9cacbca9"}, +] + +[package.dependencies] +botocore = ">=1.12.36,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] + +[[package]] +name = "setuptools" +version = "68.0.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"}, + {file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "simplejson" +version = "3.19.1" +description = "Simple, fast, extensible JSON encoder/decoder for Python" +category = "main" +optional = false +python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "simplejson-3.19.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:412e58997a30c5deb8cab5858b8e2e5b40ca007079f7010ee74565cc13d19665"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e765b1f47293dedf77946f0427e03ee45def2862edacd8868c6cf9ab97c8afbd"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:3231100edee292da78948fa0a77dee4e5a94a0a60bcba9ed7a9dc77f4d4bb11e"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:081ea6305b3b5e84ae7417e7f45956db5ea3872ec497a584ec86c3260cda049e"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:f253edf694ce836631b350d758d00a8c4011243d58318fbfbe0dd54a6a839ab4"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:5db86bb82034e055257c8e45228ca3dbce85e38d7bfa84fa7b2838e032a3219c"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:69a8b10a4f81548bc1e06ded0c4a6c9042c0be0d947c53c1ed89703f7e613950"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:58ee5e24d6863b22194020eb62673cf8cc69945fcad6b283919490f6e359f7c5"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:73d0904c2471f317386d4ae5c665b16b5c50ab4f3ee7fd3d3b7651e564ad74b1"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:66d780047c31ff316ee305c3f7550f352d87257c756413632303fc59fef19eac"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd4d50a27b065447c9c399f0bf0a993bd0e6308db8bbbfbc3ea03b41c145775a"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c16ec6a67a5f66ab004190829eeede01c633936375edcad7cbf06d3241e5865"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17a963e8dd4d81061cc05b627677c1f6a12e81345111fbdc5708c9f088d752c9"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7e78d79b10aa92f40f54178ada2b635c960d24fc6141856b926d82f67e56d169"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad071cd84a636195f35fa71de2186d717db775f94f985232775794d09f8d9061"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e7c70f19405e5f99168077b785fe15fcb5f9b3c0b70b0b5c2757ce294922c8c"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:54fca2b26bcd1c403146fd9461d1da76199442297160721b1d63def2a1b17799"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:48600a6e0032bed17c20319d91775f1797d39953ccfd68c27f83c8d7fc3b32cb"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:93f5ac30607157a0b2579af59a065bcfaa7fadeb4875bf927a8f8b6739c8d910"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b79642a599740603ca86cf9df54f57a2013c47e1dd4dd2ae4769af0a6816900"}, + {file = "simplejson-3.19.1-cp310-cp310-win32.whl", hash = "sha256:d9f2c27f18a0b94107d57294aab3d06d6046ea843ed4a45cae8bd45756749f3a"}, + {file = "simplejson-3.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:5673d27806085d2a413b3be5f85fad6fca4b7ffd31cfe510bbe65eea52fff571"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:79c748aa61fd8098d0472e776743de20fae2686edb80a24f0f6593a77f74fe86"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:390f4a8ca61d90bcf806c3ad644e05fa5890f5b9a72abdd4ca8430cdc1e386fa"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d61482b5d18181e6bb4810b4a6a24c63a490c3a20e9fbd7876639653e2b30a1a"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2541fdb7467ef9bfad1f55b6c52e8ea52b3ce4a0027d37aff094190a955daa9d"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46133bc7dd45c9953e6ee4852e3de3d5a9a4a03b068bd238935a5c72f0a1ce34"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f96def94576f857abf58e031ce881b5a3fc25cbec64b2bc4824824a8a4367af9"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f14ecca970d825df0d29d5c6736ff27999ee7bdf5510e807f7ad8845f7760ce"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:66389b6b6ee46a94a493a933a26008a1bae0cfadeca176933e7ff6556c0ce998"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:22b867205cd258050c2625325fdd9a65f917a5aff22a23387e245ecae4098e78"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c39fa911e4302eb79c804b221ddec775c3da08833c0a9120041dd322789824de"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:65dafe413b15e8895ad42e49210b74a955c9ae65564952b0243a18fb35b986cc"}, + {file = "simplejson-3.19.1-cp311-cp311-win32.whl", hash = "sha256:f05d05d99fce5537d8f7a0af6417a9afa9af3a6c4bb1ba7359c53b6257625fcb"}, + {file = "simplejson-3.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:b46aaf0332a8a9c965310058cf3487d705bf672641d2c43a835625b326689cf4"}, + {file = "simplejson-3.19.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b438e5eaa474365f4faaeeef1ec3e8d5b4e7030706e3e3d6b5bee6049732e0e6"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa9d614a612ad02492f704fbac636f666fa89295a5d22b4facf2d665fc3b5ea9"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46e89f58e4bed107626edce1cf098da3664a336d01fc78fddcfb1f397f553d44"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96ade243fb6f3b57e7bd3b71e90c190cd0f93ec5dce6bf38734a73a2e5fa274f"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed18728b90758d171f0c66c475c24a443ede815cf3f1a91e907b0db0ebc6e508"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:6a561320485017ddfc21bd2ed5de2d70184f754f1c9b1947c55f8e2b0163a268"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:2098811cd241429c08b7fc5c9e41fcc3f59f27c2e8d1da2ccdcf6c8e340ab507"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:8f8d179393e6f0cf6c7c950576892ea6acbcea0a320838c61968ac7046f59228"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:eff87c68058374e45225089e4538c26329a13499bc0104b52b77f8428eed36b2"}, + {file = "simplejson-3.19.1-cp36-cp36m-win32.whl", hash = "sha256:d300773b93eed82f6da138fd1d081dc96fbe53d96000a85e41460fe07c8d8b33"}, + {file = "simplejson-3.19.1-cp36-cp36m-win_amd64.whl", hash = "sha256:37724c634f93e5caaca04458f267836eb9505d897ab3947b52f33b191bf344f3"}, + {file = "simplejson-3.19.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:74bf802debe68627227ddb665c067eb8c73aa68b2476369237adf55c1161b728"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70128fb92932524c89f373e17221cf9535d7d0c63794955cc3cd5868e19f5d38"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8090e75653ea7db75bc21fa5f7bcf5f7bdf64ea258cbbac45c7065f6324f1b50"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a755f7bfc8adcb94887710dc70cc12a69a454120c6adcc6f251c3f7b46ee6aac"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ccb2c1877bc9b25bc4f4687169caa925ffda605d7569c40e8e95186e9a5e58b"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:919bc5aa4d8094cf8f1371ea9119e5d952f741dc4162810ab714aec948a23fe5"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e333c5b62e93949f5ac27e6758ba53ef6ee4f93e36cc977fe2e3df85c02f6dc4"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3a4480e348000d89cf501b5606415f4d328484bbb431146c2971123d49fd8430"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cb502cde018e93e75dc8fc7bb2d93477ce4f3ac10369f48866c61b5e031db1fd"}, + {file = "simplejson-3.19.1-cp37-cp37m-win32.whl", hash = "sha256:f41915a4e1f059dfad614b187bc06021fefb5fc5255bfe63abf8247d2f7a646a"}, + {file = "simplejson-3.19.1-cp37-cp37m-win_amd64.whl", hash = "sha256:3844305bc33d52c4975da07f75b480e17af3558c0d13085eaa6cc2f32882ccf7"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1cb19eacb77adc5a9720244d8d0b5507421d117c7ed4f2f9461424a1829e0ceb"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:926957b278de22797bfc2f004b15297013843b595b3cd7ecd9e37ccb5fad0b72"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b0e9a5e66969f7a47dc500e3dba8edc3b45d4eb31efb855c8647700a3493dd8a"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79d46e7e33c3a4ef853a1307b2032cfb7220e1a079d0c65488fbd7118f44935a"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344a5093b71c1b370968d0fbd14d55c9413cb6f0355fdefeb4a322d602d21776"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23fbb7b46d44ed7cbcda689295862851105c7594ae5875dce2a70eeaa498ff86"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3025e7e9ddb48813aec2974e1a7e68e63eac911dd5e0a9568775de107ac79a"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:87b190e6ceec286219bd6b6f13547ca433f977d4600b4e81739e9ac23b5b9ba9"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc935d8322ba9bc7b84f99f40f111809b0473df167bf5b93b89fb719d2c4892b"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3b652579c21af73879d99c8072c31476788c8c26b5565687fd9db154070d852a"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6aa7ca03f25b23b01629b1c7f78e1cd826a66bfb8809f8977a3635be2ec48f1a"}, + {file = "simplejson-3.19.1-cp38-cp38-win32.whl", hash = "sha256:08be5a241fdf67a8e05ac7edbd49b07b638ebe4846b560673e196b2a25c94b92"}, + {file = "simplejson-3.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:ca56a6c8c8236d6fe19abb67ef08d76f3c3f46712c49a3b6a5352b6e43e8855f"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6424d8229ba62e5dbbc377908cfee9b2edf25abd63b855c21f12ac596cd18e41"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:547ea86ca408a6735335c881a2e6208851027f5bfd678d8f2c92a0f02c7e7330"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:889328873c35cb0b2b4c83cbb83ec52efee5a05e75002e2c0c46c4e42790e83c"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cdb4e544134f305b033ad79ae5c6b9a32e7c58b46d9f55a64e2a883fbbba01"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc2b3f06430cbd4fac0dae5b2974d2bf14f71b415fb6de017f498950da8159b1"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d125e754d26c0298715bdc3f8a03a0658ecbe72330be247f4b328d229d8cf67f"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:476c8033abed7b1fd8db62a7600bf18501ce701c1a71179e4ce04ac92c1c5c3c"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:199a0bcd792811c252d71e3eabb3d4a132b3e85e43ebd93bfd053d5b59a7e78b"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a79b439a6a77649bb8e2f2644e6c9cc0adb720fc55bed63546edea86e1d5c6c8"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:203412745fed916fc04566ecef3f2b6c872b52f1e7fb3a6a84451b800fb508c1"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ca922c61d87b4c38f37aa706520328ffe22d7ac1553ef1cadc73f053a673553"}, + {file = "simplejson-3.19.1-cp39-cp39-win32.whl", hash = "sha256:3e0902c278243d6f7223ba3e6c5738614c971fd9a887fff8feaa8dcf7249c8d4"}, + {file = "simplejson-3.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:d396b610e77b0c438846607cd56418bfc194973b9886550a98fd6724e8c6cfec"}, + {file = "simplejson-3.19.1-py3-none-any.whl", hash = "sha256:4710806eb75e87919b858af0cba4ffedc01b463edc3982ded7b55143f39e41e1"}, + {file = "simplejson-3.19.1.tar.gz", hash = "sha256:6277f60848a7d8319d27d2be767a7546bc965535b28070e310b3a9af90604a4c"}, +] + +[[package]] +name = "singer-sdk" +version = "0.26.0" +description = "A framework for building Singer taps" +category = "main" +optional = false +python-versions = ">=3.7.1,<3.12" +files = [ + {file = "singer_sdk-0.26.0-py3-none-any.whl", hash = "sha256:bd506a28734eb8c343dba3ac4ff01881f0bf65e75b9d35b0a3b264ee4540924d"}, + {file = "singer_sdk-0.26.0.tar.gz", hash = "sha256:b0b8d26a667a2c213128b750f1d9deddc9b8c286937cf6c38a3a5ee0f7eee815"}, +] + +[package.dependencies] +backoff = ">=2.0.0,<3.0" +click = ">=8.0,<9.0" +cryptography = ">=3.4.6,<41.0.0" +fs = ">=2.4.16,<3.0.0" +importlib-metadata = {version = "<5.0.0", markers = "python_version < \"3.8\""} +importlib-resources = {version = "5.12.0", markers = "python_version < \"3.9\""} +inflection = ">=0.5.1,<0.6.0" +joblib = ">=1.0.1,<2.0.0" +jsonpath-ng = ">=1.5.3,<2.0.0" +jsonschema = ">=4.16.0,<5.0.0" +memoization = ">=0.3.2,<0.5.0" +pendulum = ">=2.1.0,<3.0.0" +PyJWT = ">=2.4,<3.0" +pytest = {version = ">=7.2.1,<8.0.0", optional = true, markers = "extra == \"testing\""} +pytest-durations = {version = ">=1.2.0,<2.0.0", optional = true, markers = "extra == \"testing\""} +python-dotenv = ">=0.20,<0.22" +pytz = ">=2022.2.1,<2024.0.0" +PyYAML = ">=6.0,<7.0" +requests = ">=2.25.1,<3.0.0" +simplejson = ">=3.17.6,<4.0.0" +sqlalchemy = ">=1.4,<2.0" +typing-extensions = ">=4.2.0,<5.0.0" + +[package.extras] +docs = ["furo (>=2022.12.7,<2024.0.0)", "myst-parser (>=0.17.2,<1.1.0)", "sphinx (>=4.5,<6.0)", "sphinx-autobuild (>=2021.3.14,<2022.0.0)", "sphinx-copybutton (>=0.3.1,<0.6.0)", "sphinx-reredirects (>=0.1.1,<0.2.0)"] +s3 = ["fs-s3fs (>=1.1.1,<2.0.0)"] +testing = ["pytest (>=7.2.1,<8.0.0)", "pytest-durations (>=1.2.0,<2.0.0)"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sqlalchemy" +version = "1.4.48" +description = "Database Abstraction Library" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "SQLAlchemy-1.4.48-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:4bac3aa3c3d8bc7408097e6fe8bf983caa6e9491c5d2e2488cfcfd8106f13b6a"}, + {file = "SQLAlchemy-1.4.48-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:dbcae0e528d755f4522cad5842f0942e54b578d79f21a692c44d91352ea6d64e"}, + {file = "SQLAlchemy-1.4.48-cp27-cp27m-win32.whl", hash = "sha256:cbbe8b8bffb199b225d2fe3804421b7b43a0d49983f81dc654d0431d2f855543"}, + {file = "SQLAlchemy-1.4.48-cp27-cp27m-win_amd64.whl", hash = "sha256:627e04a5d54bd50628fc8734d5fc6df2a1aa5962f219c44aad50b00a6cdcf965"}, + {file = "SQLAlchemy-1.4.48-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9af1db7a287ef86e0f5cd990b38da6bd9328de739d17e8864f1817710da2d217"}, + {file = "SQLAlchemy-1.4.48-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:ce7915eecc9c14a93b73f4e1c9d779ca43e955b43ddf1e21df154184f39748e5"}, + {file = "SQLAlchemy-1.4.48-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5381ddd09a99638f429f4cbe1b71b025bed318f6a7b23e11d65f3eed5e181c33"}, + {file = "SQLAlchemy-1.4.48-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:87609f6d4e81a941a17e61a4c19fee57f795e96f834c4f0a30cee725fc3f81d9"}, + {file = "SQLAlchemy-1.4.48-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb0808ad34167f394fea21bd4587fc62f3bd81bba232a1e7fbdfa17e6cfa7cd7"}, + {file = "SQLAlchemy-1.4.48-cp310-cp310-win32.whl", hash = "sha256:d53cd8bc582da5c1c8c86b6acc4ef42e20985c57d0ebc906445989df566c5603"}, + {file = "SQLAlchemy-1.4.48-cp310-cp310-win_amd64.whl", hash = "sha256:4355e5915844afdc5cf22ec29fba1010166e35dd94a21305f49020022167556b"}, + {file = "SQLAlchemy-1.4.48-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:066c2b0413e8cb980e6d46bf9d35ca83be81c20af688fedaef01450b06e4aa5e"}, + {file = "SQLAlchemy-1.4.48-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c99bf13e07140601d111a7c6f1fc1519914dd4e5228315bbda255e08412f61a4"}, + {file = "SQLAlchemy-1.4.48-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ee26276f12614d47cc07bc85490a70f559cba965fb178b1c45d46ffa8d73fda"}, + {file = "SQLAlchemy-1.4.48-cp311-cp311-win32.whl", hash = "sha256:49c312bcff4728bffc6fb5e5318b8020ed5c8b958a06800f91859fe9633ca20e"}, + {file = "SQLAlchemy-1.4.48-cp311-cp311-win_amd64.whl", hash = "sha256:cef2e2abc06eab187a533ec3e1067a71d7bbec69e582401afdf6d8cad4ba3515"}, + {file = "SQLAlchemy-1.4.48-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:3509159e050bd6d24189ec7af373359f07aed690db91909c131e5068176c5a5d"}, + {file = "SQLAlchemy-1.4.48-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fc2ab4d9f6d9218a5caa4121bdcf1125303482a1cdcfcdbd8567be8518969c0"}, + {file = "SQLAlchemy-1.4.48-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e1ddbbcef9bcedaa370c03771ebec7e39e3944782bef49e69430383c376a250b"}, + {file = "SQLAlchemy-1.4.48-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f82d8efea1ca92b24f51d3aea1a82897ed2409868a0af04247c8c1e4fef5890"}, + {file = "SQLAlchemy-1.4.48-cp36-cp36m-win32.whl", hash = "sha256:e3e98d4907805b07743b583a99ecc58bf8807ecb6985576d82d5e8ae103b5272"}, + {file = "SQLAlchemy-1.4.48-cp36-cp36m-win_amd64.whl", hash = "sha256:25887b4f716e085a1c5162f130b852f84e18d2633942c8ca40dfb8519367c14f"}, + {file = "SQLAlchemy-1.4.48-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:0817c181271b0ce5df1aa20949f0a9e2426830fed5ecdcc8db449618f12c2730"}, + {file = "SQLAlchemy-1.4.48-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe1dd2562313dd9fe1778ed56739ad5d9aae10f9f43d9f4cf81d65b0c85168bb"}, + {file = "SQLAlchemy-1.4.48-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:68413aead943883b341b2b77acd7a7fe2377c34d82e64d1840860247cec7ff7c"}, + {file = "SQLAlchemy-1.4.48-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbde5642104ac6e95f96e8ad6d18d9382aa20672008cf26068fe36f3004491df"}, + {file = "SQLAlchemy-1.4.48-cp37-cp37m-win32.whl", hash = "sha256:11c6b1de720f816c22d6ad3bbfa2f026f89c7b78a5c4ffafb220e0183956a92a"}, + {file = "SQLAlchemy-1.4.48-cp37-cp37m-win_amd64.whl", hash = "sha256:eb5464ee8d4bb6549d368b578e9529d3c43265007193597ddca71c1bae6174e6"}, + {file = "SQLAlchemy-1.4.48-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:92e6133cf337c42bfee03ca08c62ba0f2d9695618c8abc14a564f47503157be9"}, + {file = "SQLAlchemy-1.4.48-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44d29a3fc6d9c45962476b470a81983dd8add6ad26fdbfae6d463b509d5adcda"}, + {file = "SQLAlchemy-1.4.48-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:005e942b451cad5285015481ae4e557ff4154dde327840ba91b9ac379be3b6ce"}, + {file = "SQLAlchemy-1.4.48-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c8cfe951ed074ba5e708ed29c45397a95c4143255b0d022c7c8331a75ae61f3"}, + {file = "SQLAlchemy-1.4.48-cp38-cp38-win32.whl", hash = "sha256:2b9af65cc58726129d8414fc1a1a650dcdd594ba12e9c97909f1f57d48e393d3"}, + {file = "SQLAlchemy-1.4.48-cp38-cp38-win_amd64.whl", hash = "sha256:2b562e9d1e59be7833edf28b0968f156683d57cabd2137d8121806f38a9d58f4"}, + {file = "SQLAlchemy-1.4.48-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:a1fc046756cf2a37d7277c93278566ddf8be135c6a58397b4c940abf837011f4"}, + {file = "SQLAlchemy-1.4.48-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d9b55252d2ca42a09bcd10a697fa041e696def9dfab0b78c0aaea1485551a08"}, + {file = "SQLAlchemy-1.4.48-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6dab89874e72a9ab5462997846d4c760cdb957958be27b03b49cf0de5e5c327c"}, + {file = "SQLAlchemy-1.4.48-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fd8b5ee5a3acc4371f820934b36f8109ce604ee73cc668c724abb054cebcb6e"}, + {file = "SQLAlchemy-1.4.48-cp39-cp39-win32.whl", hash = "sha256:eee09350fd538e29cfe3a496ec6f148504d2da40dbf52adefb0d2f8e4d38ccc4"}, + {file = "SQLAlchemy-1.4.48-cp39-cp39-win_amd64.whl", hash = "sha256:7ad2b0f6520ed5038e795cc2852eb5c1f20fa6831d73301ced4aafbe3a10e1f6"}, + {file = "SQLAlchemy-1.4.48.tar.gz", hash = "sha256:b47bc287096d989a0838ce96f7d8e966914a24da877ed41a7531d44b55cdb8df"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and platform_machine == \"aarch64\" or python_version >= \"3\" and platform_machine == \"ppc64le\" or python_version >= \"3\" and platform_machine == \"x86_64\" or python_version >= \"3\" and platform_machine == \"amd64\" or python_version >= \"3\" and platform_machine == \"AMD64\" or python_version >= \"3\" and platform_machine == \"win32\" or python_version >= \"3\" and platform_machine == \"WIN32\""} +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +aiomysql = ["aiomysql", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"] +mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx-oracle (>=7)", "cx-oracle (>=7,<8)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +pymysql = ["pymysql", "pymysql (<1)"] +sqlcipher = ["sqlcipher3-binary"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "typing-extensions" +version = "4.7.1" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, +] + +[[package]] +name = "urllib3" +version = "1.26.16" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.16-py2.py3-none-any.whl", hash = "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f"}, + {file = "urllib3-1.26.16.tar.gz", hash = "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "zipp" +version = "3.15.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, + {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[extras] +s3 = ["fs-s3fs"] + +[metadata] +lock-version = "2.0" +python-versions = "<3.12,>=3.7.1" +content-hash = "6ab84b9a0ba4615bacb39e9fb93e992363af8c0956864bde3c54a1696cd1118b" From 5738c5c7632db86d027872f6f686f8712a34d836 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 6 Jul 2023 12:34:33 -0400 Subject: [PATCH 101/105] linting --- tap_hubspot/streams.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 543b64f..cace03c 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -454,7 +454,7 @@ def get_url_params( return params - records_jsonpath = "$[subscriptionDefinitions][*]" # Or override `parse_response`. + records_jsonpath = "$[subscriptionDefinitions][*]" # Or override `parse_response`. class PropertyTicketStream(HubspotStream): @@ -556,7 +556,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: row["hubspot_object"] = "ticket" return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -640,7 +640,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -723,7 +723,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -806,7 +806,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -889,7 +889,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -972,7 +972,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -1055,7 +1055,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -1138,7 +1138,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -1221,7 +1221,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -1304,7 +1304,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -1387,7 +1387,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -1470,7 +1470,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. @@ -1553,7 +1553,7 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: pass return super().post_process(row, context) - + records_jsonpath = "$[results][*]" # Or override `parse_response`. def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: @@ -2691,4 +2691,3 @@ def get_url_params( return params records_jsonpath = "$[results][*]" # Or override `parse_response`. - From 1c7c7ff63aa539832e38ce106fcf4dac74d78851 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 6 Jul 2023 13:09:55 -0400 Subject: [PATCH 102/105] move records_jsonpath to top of class definition, add description --- tap_hubspot/streams.py | 140 ++++++++++++++++++++--------------------- 1 file changed, 70 insertions(+), 70 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index cace03c..19f373d 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -30,6 +30,7 @@ class ContactStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "contacts" @@ -37,6 +38,7 @@ class ContactStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -89,8 +91,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class UsersStream(HubspotStream): @@ -103,7 +103,8 @@ class UsersStream(HubspotStream): path: path which will be added to api url in client.py schema: instream schema primary_keys = primary keys for the table - replication_key = datetime keys for replication + replication_key = id keys for replication + records_jsonpath = json response body """ name = "users" @@ -111,6 +112,7 @@ class UsersStream(HubspotStream): primary_keys = ["id"] replication_key = "id" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -150,8 +152,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class OwnersStream(HubspotStream): @@ -165,6 +165,7 @@ class OwnersStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "owners" @@ -172,6 +173,7 @@ class OwnersStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -215,8 +217,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class TicketPipelineStream(HubspotStream): @@ -230,6 +230,7 @@ class TicketPipelineStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "ticket_pipelines" @@ -237,6 +238,7 @@ class TicketPipelineStream(HubspotStream): primary_keys = ["createdAt"] replication_key = "createdAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("label", StringType), @@ -301,8 +303,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class DealPipelineStream(HubspotStream): @@ -316,6 +316,7 @@ class DealPipelineStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "deal_pipelines" @@ -323,6 +324,7 @@ class DealPipelineStream(HubspotStream): primary_keys = ["createdAt"] replication_key = "createdAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("label", StringType), @@ -387,8 +389,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class EmailSubscriptionStream(HubspotStream): @@ -401,7 +401,8 @@ class EmailSubscriptionStream(HubspotStream): path: path which will be added to api url in client.py schema: instream schema primary_keys = primary keys for the table - replication_key = datetime keys for replication + replication_key = id keys for replication + records_jsonpath = json response body """ name = "email_subscriptions" @@ -409,6 +410,7 @@ class EmailSubscriptionStream(HubspotStream): primary_keys = ["id"] replication_key = "id" replication_method = "incremental" + records_jsonpath = "$[subscriptionDefinitions][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", IntegerType), @@ -454,8 +456,6 @@ def get_url_params( return params - records_jsonpath = "$[subscriptionDefinitions][*]" # Or override `parse_response`. - class PropertyTicketStream(HubspotStream): @@ -469,6 +469,7 @@ class PropertyTicketStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_tickets" @@ -476,6 +477,7 @@ class PropertyTicketStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -557,8 +559,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyDealStream(HubspotStream): @@ -572,6 +572,7 @@ class PropertyDealStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_deals" @@ -579,6 +580,7 @@ class PropertyDealStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -641,8 +643,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyContactStream(HubspotStream): @@ -656,6 +656,7 @@ class PropertyContactStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_contacts" @@ -663,6 +664,7 @@ class PropertyContactStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -724,8 +726,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyCompanyStream(HubspotStream): @@ -739,6 +739,7 @@ class PropertyCompanyStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_companies" @@ -746,6 +747,7 @@ class PropertyCompanyStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -807,8 +809,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyProductStream(HubspotStream): @@ -822,6 +822,7 @@ class PropertyProductStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_products" @@ -829,6 +830,7 @@ class PropertyProductStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -890,8 +892,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyLineItemStream(HubspotStream): @@ -905,6 +905,7 @@ class PropertyLineItemStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_line_items" @@ -912,6 +913,7 @@ class PropertyLineItemStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -973,8 +975,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyEmailStream(HubspotStream): @@ -988,6 +988,7 @@ class PropertyEmailStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_emails" @@ -995,6 +996,7 @@ class PropertyEmailStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -1056,8 +1058,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyPostalMailStream(HubspotStream): @@ -1071,6 +1071,7 @@ class PropertyPostalMailStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_postal_mails" @@ -1078,6 +1079,7 @@ class PropertyPostalMailStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -1139,8 +1141,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyCallStream(HubspotStream): @@ -1154,6 +1154,7 @@ class PropertyCallStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_calls" @@ -1161,6 +1162,7 @@ class PropertyCallStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -1222,8 +1224,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyMeetingStream(HubspotStream): @@ -1237,6 +1237,7 @@ class PropertyMeetingStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_meetings" @@ -1244,6 +1245,7 @@ class PropertyMeetingStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -1305,8 +1307,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyTaskStream(HubspotStream): @@ -1320,6 +1320,7 @@ class PropertyTaskStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_tasks" @@ -1327,6 +1328,7 @@ class PropertyTaskStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -1388,8 +1390,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyCommunicationStream(HubspotStream): @@ -1403,6 +1403,7 @@ class PropertyCommunicationStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "property_communications" @@ -1410,6 +1411,7 @@ class PropertyCommunicationStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -1471,8 +1473,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PropertyNotesStream(HubspotStream): @@ -1486,6 +1486,7 @@ class PropertyNotesStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "properties" @@ -1493,6 +1494,7 @@ class PropertyNotesStream(HubspotStream): primary_keys = ["label"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("updatedAt", StringType), @@ -1554,8 +1556,6 @@ def post_process(self, row: dict, context: dict | None = None) -> dict | None: return super().post_process(row, context) - records_jsonpath = "$[results][*]" # Or override `parse_response`. - def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: """ Merges all the property stream data into a single property table @@ -1608,6 +1608,7 @@ class CompanyStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "companies" @@ -1615,6 +1616,7 @@ class CompanyStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -1667,8 +1669,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class DealStream(HubspotStream): """ @@ -1681,6 +1681,7 @@ class DealStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "deals" @@ -1688,6 +1689,7 @@ class DealStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -1740,8 +1742,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class FeedbackSubmissionsStream(HubspotStream): """ @@ -1754,6 +1754,7 @@ class FeedbackSubmissionsStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "feedback_submissions" @@ -1761,6 +1762,7 @@ class FeedbackSubmissionsStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -1814,8 +1816,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class LineItemStream(HubspotStream): """ @@ -1828,6 +1828,7 @@ class LineItemStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "line_items" @@ -1835,6 +1836,7 @@ class LineItemStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -1887,8 +1889,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class ProductStream(HubspotStream): """ @@ -1901,6 +1901,7 @@ class ProductStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "products" @@ -1908,6 +1909,7 @@ class ProductStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -1960,8 +1962,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class TicketStream(HubspotStream): """ @@ -1974,6 +1974,7 @@ class TicketStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "tickets" @@ -1981,6 +1982,7 @@ class TicketStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2032,8 +2034,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class QuoteStream(HubspotStream): """ @@ -2046,6 +2046,7 @@ class QuoteStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "quotes" @@ -2053,6 +2054,7 @@ class QuoteStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2105,8 +2107,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class GoalStream(HubspotStream): """ @@ -2119,6 +2119,7 @@ class GoalStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "goals" @@ -2126,6 +2127,7 @@ class GoalStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2177,8 +2179,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class CallStream(HubspotStream): """ @@ -2191,6 +2191,7 @@ class CallStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "calls" @@ -2198,6 +2199,7 @@ class CallStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2253,8 +2255,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class CommunicationStream(HubspotStream): """ @@ -2267,6 +2267,7 @@ class CommunicationStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "communications" @@ -2274,6 +2275,7 @@ class CommunicationStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2323,8 +2325,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class EmailStream(HubspotStream): """ @@ -2337,6 +2337,7 @@ class EmailStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "emails" @@ -2344,6 +2345,7 @@ class EmailStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2402,8 +2404,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class MeetingStream(HubspotStream): """ @@ -2416,6 +2416,7 @@ class MeetingStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "meetings" @@ -2423,6 +2424,7 @@ class MeetingStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2479,8 +2481,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class NoteStream(HubspotStream): """ @@ -2493,6 +2493,7 @@ class NoteStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "notes" @@ -2500,6 +2501,7 @@ class NoteStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2549,8 +2551,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class PostalMailStream(HubspotStream): """ @@ -2563,6 +2563,7 @@ class PostalMailStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "postal_mail" @@ -2570,6 +2571,7 @@ class PostalMailStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2617,8 +2619,6 @@ def get_url_params( return params - records_jsonpath = "$[results][*]" # Or override `parse_response`. - class TaskStream(HubspotStream): """ @@ -2631,6 +2631,7 @@ class TaskStream(HubspotStream): schema: instream schema primary_keys = primary keys for the table replication_key = datetime keys for replication + records_jsonpath = json response body """ name = "tasks" @@ -2638,6 +2639,7 @@ class TaskStream(HubspotStream): primary_keys = ["id"] replication_key = "updatedAt" replication_method = "incremental" + records_jsonpath = "$[results][*]" # Or override `parse_response`. schema = PropertiesList( Property("id", StringType), @@ -2689,5 +2691,3 @@ def get_url_params( params["order_by"] = self.replication_key return params - - records_jsonpath = "$[results][*]" # Or override `parse_response`. From 3d7d2cd7d92a8f673a9fd848970a9f711215f9d0 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 6 Jul 2023 13:37:10 -0400 Subject: [PATCH 103/105] remove post_process --- tap_hubspot/streams.py | 153 ----------------------------------------- 1 file changed, 153 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 19f373d..f92a0c6 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -550,15 +550,6 @@ def get_url_params( return params - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - row["hubspot_object"] = "ticket" - - return super().post_process(row, context) - class PropertyDealStream(HubspotStream): @@ -631,18 +622,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "deal" - except: - pass - - return super().post_process(row, context) - class PropertyContactStream(HubspotStream): @@ -714,18 +693,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "contact" - except: - pass - - return super().post_process(row, context) - class PropertyCompanyStream(HubspotStream): @@ -797,18 +764,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "company" - except: - pass - - return super().post_process(row, context) - class PropertyProductStream(HubspotStream): @@ -880,18 +835,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "product" - except: - pass - - return super().post_process(row, context) - class PropertyLineItemStream(HubspotStream): @@ -963,18 +906,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "line_item" - except: - pass - - return super().post_process(row, context) - class PropertyEmailStream(HubspotStream): @@ -1046,18 +977,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "email" - except: - pass - - return super().post_process(row, context) - class PropertyPostalMailStream(HubspotStream): @@ -1129,18 +1048,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "postal_mail" - except: - pass - - return super().post_process(row, context) - class PropertyCallStream(HubspotStream): @@ -1212,18 +1119,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "call" - except: - pass - - return super().post_process(row, context) - class PropertyMeetingStream(HubspotStream): @@ -1295,18 +1190,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "meeting" - except: - pass - - return super().post_process(row, context) - class PropertyTaskStream(HubspotStream): @@ -1378,18 +1261,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "task" - except: - pass - - return super().post_process(row, context) - class PropertyCommunicationStream(HubspotStream): @@ -1461,18 +1332,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "communication" - except: - pass - - return super().post_process(row, context) - class PropertyNotesStream(HubspotStream): @@ -1544,18 +1403,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def post_process(self, row: dict, context: dict | None = None) -> dict | None: - """ - Returns api records with added columns - """ - - try: - row["hubspot_object"] = "note" - except: - pass - - return super().post_process(row, context) - def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: """ Merges all the property stream data into a single property table From a424a481959e9e6ab8910a2e4f31a419193bb14b Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 6 Jul 2023 13:50:46 -0400 Subject: [PATCH 104/105] removed get_url_params from streams, update in client.py --- tap_hubspot/client.py | 3 +- tap_hubspot/streams.py | 506 ----------------------------------------- 2 files changed, 2 insertions(+), 507 deletions(-) diff --git a/tap_hubspot/client.py b/tap_hubspot/client.py index d938c4b..838c6c8 100644 --- a/tap_hubspot/client.py +++ b/tap_hubspot/client.py @@ -115,8 +115,9 @@ def get_url_params( """ params: dict = {} if next_page_token: - params["page"] = next_page_token + params["after"] = next_page_token if self.replication_key: params["sort"] = "asc" params["order_by"] = self.replication_key + return params diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index f92a0c6..0799cd2 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -68,29 +68,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class UsersStream(HubspotStream): @@ -129,29 +106,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/settings/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class OwnersStream(HubspotStream): @@ -194,29 +148,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class TicketPipelineStream(HubspotStream): @@ -280,29 +211,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm-pipelines/v1" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class DealPipelineStream(HubspotStream): @@ -366,29 +274,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm-pipelines/v1" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class EmailSubscriptionStream(HubspotStream): @@ -433,29 +318,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/email/public/v1" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class PropertyTicketStream(HubspotStream): @@ -527,29 +389,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class PropertyDealStream(HubspotStream): @@ -1493,29 +1332,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class DealStream(HubspotStream): """ @@ -1566,29 +1382,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class FeedbackSubmissionsStream(HubspotStream): """ @@ -1640,29 +1433,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class LineItemStream(HubspotStream): """ @@ -1713,29 +1483,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class ProductStream(HubspotStream): """ @@ -1786,29 +1533,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class TicketStream(HubspotStream): """ @@ -1858,29 +1582,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class QuoteStream(HubspotStream): """ @@ -1931,29 +1632,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class GoalStream(HubspotStream): """ @@ -2003,29 +1681,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class CallStream(HubspotStream): """ @@ -2079,29 +1734,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class CommunicationStream(HubspotStream): """ @@ -2149,29 +1781,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class EmailStream(HubspotStream): """ @@ -2228,29 +1837,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class MeetingStream(HubspotStream): """ @@ -2305,29 +1891,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class NoteStream(HubspotStream): """ @@ -2375,29 +1938,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class PostalMailStream(HubspotStream): """ @@ -2443,29 +1983,6 @@ def url_base(self) -> str: base_url = "https://api.hubapi.com/crm/v3" return base_url - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params - class TaskStream(HubspotStream): """ @@ -2515,26 +2032,3 @@ def url_base(self) -> str: """ base_url = "https://api.hubapi.com/crm/v3" return base_url - - def get_url_params( - self, - context: dict | None, - next_page_token: Any | None, - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. - - Args: - context: The stream context. - next_page_token: The next page index or value. - - Returns: - A dictionary of URL query parameters. - """ - params: dict = {} - if next_page_token: - params["after"] = next_page_token - if self.replication_key: - params["sort"] = "asc" - params["order_by"] = self.replication_key - - return params From 78297fe36edc67bd264d93ce293339b95ea40339 Mon Sep 17 00:00:00 2001 From: NeilGorman104 Date: Thu, 6 Jul 2023 13:57:32 -0400 Subject: [PATCH 105/105] simplify url_base function --- tap_hubspot/streams.py | 103 ++++++++++++++--------------------------- 1 file changed, 35 insertions(+), 68 deletions(-) diff --git a/tap_hubspot/streams.py b/tap_hubspot/streams.py index 0799cd2..5374529 100644 --- a/tap_hubspot/streams.py +++ b/tap_hubspot/streams.py @@ -65,8 +65,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class UsersStream(HubspotStream): @@ -103,8 +102,8 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/settings/v3" - return base_url + + return "https://api.hubapi.com/settings/v3" class OwnersStream(HubspotStream): @@ -145,8 +144,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class TicketPipelineStream(HubspotStream): @@ -208,8 +206,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm-pipelines/v1" - return base_url + return "https://api.hubapi.com/crm-pipelines/v1" class DealPipelineStream(HubspotStream): @@ -271,8 +268,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm-pipelines/v1" - return base_url + return "https://api.hubapi.com/crm-pipelines/v1" class EmailSubscriptionStream(HubspotStream): @@ -315,8 +311,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/email/public/v1" - return base_url + return "https://api.hubapi.com/email/public/v1" class PropertyTicketStream(HubspotStream): @@ -386,8 +381,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyDealStream(HubspotStream): @@ -458,8 +452,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyContactStream(HubspotStream): @@ -529,8 +522,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyCompanyStream(HubspotStream): @@ -600,8 +592,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyProductStream(HubspotStream): @@ -671,8 +662,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyLineItemStream(HubspotStream): @@ -742,8 +732,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyEmailStream(HubspotStream): @@ -813,8 +802,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyPostalMailStream(HubspotStream): @@ -884,8 +872,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyCallStream(HubspotStream): @@ -955,8 +942,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyMeetingStream(HubspotStream): @@ -1026,8 +1012,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyTaskStream(HubspotStream): @@ -1097,8 +1082,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyCommunicationStream(HubspotStream): @@ -1168,8 +1152,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PropertyNotesStream(HubspotStream): @@ -1239,8 +1222,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: """ @@ -1329,8 +1311,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class DealStream(HubspotStream): @@ -1379,8 +1360,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class FeedbackSubmissionsStream(HubspotStream): @@ -1430,8 +1410,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class LineItemStream(HubspotStream): @@ -1480,8 +1459,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class ProductStream(HubspotStream): @@ -1530,8 +1508,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class TicketStream(HubspotStream): @@ -1579,8 +1556,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class QuoteStream(HubspotStream): @@ -1629,8 +1605,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class GoalStream(HubspotStream): @@ -1678,8 +1653,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class CallStream(HubspotStream): @@ -1731,8 +1705,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class CommunicationStream(HubspotStream): @@ -1778,8 +1751,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class EmailStream(HubspotStream): @@ -1834,8 +1806,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class MeetingStream(HubspotStream): @@ -1888,8 +1859,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class NoteStream(HubspotStream): @@ -1935,8 +1905,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class PostalMailStream(HubspotStream): @@ -1980,8 +1949,7 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3" class TaskStream(HubspotStream): @@ -2030,5 +1998,4 @@ def url_base(self) -> str: """ Returns an updated path which includes the api version """ - base_url = "https://api.hubapi.com/crm/v3" - return base_url + return "https://api.hubapi.com/crm/v3"