From f355a07cf285ef8278c069149a7dd71e9bb936e6 Mon Sep 17 00:00:00 2001 From: aeluce Date: Thu, 30 Jan 2025 15:34:49 -0600 Subject: [PATCH] misc typo fixes --- .../tutorials/postgresql_cdc_to_snowflake.md | 10 +++++----- .../dekaf_reading_collections_from_kafka.md | 2 +- site/docs/reference/Configuring-task-shards.md | 2 +- .../MySQL/singlestore-batch.md | 10 +++++----- .../capture-connectors/PostgreSQL/Supabase.md | 2 +- .../PostgreSQL/google-cloud-sql-postgres.md | 2 +- .../PostgreSQL/neon-postgres.md | 2 +- .../Connectors/capture-connectors/alpaca.md | 2 +- .../Connectors/capture-connectors/amazon-s3.md | 2 +- .../capture-connectors/apache-kafka.md | 2 +- .../capture-connectors/facebook-marketing.md | 2 +- .../Connectors/capture-connectors/gitlab.md | 8 ++++---- .../capture-connectors/google-search-console.md | 6 +++--- .../capture-connectors/google-sheets.md | 4 ++-- .../Connectors/capture-connectors/harvest.md | 3 ++- .../Connectors/capture-connectors/marketo.md | 2 +- .../Connectors/capture-connectors/snapchat.md | 2 -- .../Connectors/capture-connectors/tiktok.md | 2 +- .../Connectors/dekaf/dekaf-clickhouse.md | 2 +- .../MySQL/amazon-rds-mysql.md | 2 +- .../MySQL/google-cloud-sql-mysql.md | 8 +++++--- .../materialization-connectors/MySQL/mysql.md | 4 ++-- .../PostgreSQL/google-cloud-sql-postgres.md | 2 +- .../SQLServer/amazon-rds-sqlserver.md | 2 +- .../materialization-connectors/alloydb.md | 4 ++-- .../materialization-connectors/slack.md | 14 +++++++------- .../materialization-connectors/starburst.md | 16 ++++++++-------- .../materialization-connectors/timescaledb.md | 2 +- site/docs/reference/editing.md | 4 ++-- .../reference/materialization-sync-schedule.md | 10 +++++----- site/docs/reference/notifications.md | 4 ++-- 31 files changed, 70 insertions(+), 69 deletions(-) diff --git a/site/docs/getting-started/tutorials/postgresql_cdc_to_snowflake.md b/site/docs/getting-started/tutorials/postgresql_cdc_to_snowflake.md index b27a3e7c7f..e99ba4d331 100644 --- a/site/docs/getting-started/tutorials/postgresql_cdc_to_snowflake.md +++ b/site/docs/getting-started/tutorials/postgresql_cdc_to_snowflake.md @@ -328,9 +328,9 @@ Preparing Snowflake for use with Estuary Flow involves the following steps: 1\. Keep the Flow web app open and open a new tab or window to access your Snowflake console. -3\. Create a new SQL worksheet. This provides a platform to execute queries. +2\. Create a new SQL worksheet. This provides a platform to execute queries. -4\. Paste the provided script into the SQL console, adjusting the value for `estuary_password` to a strong password. +3\. Paste the provided script into the SQL console, adjusting the value for `estuary_password` to a strong password. ```sql set database_name = 'ESTUARY_DB'; @@ -373,11 +373,11 @@ use role sysadmin; COMMIT; ``` -5\. Execute all the queries by clicking the drop-down arrow next to the Run button and selecting "Run All." +4\. Execute all the queries by clicking the drop-down arrow next to the Run button and selecting "Run All." -6\. Snowflake will process the queries, setting up the necessary roles, databases, schemas, users, and warehouses for Estuary Flow. +5\. Snowflake will process the queries, setting up the necessary roles, databases, schemas, users, and warehouses for Estuary Flow. -7\. Once the setup is complete, return to the Flow web application to continue with the integration process. +6\. Once the setup is complete, return to the Flow web application to continue with the integration process. Back in Flow, head over to the **Destinations** page, where you can [create a new Materialization](https://dashboard.estuary.dev/materializations/create). diff --git a/site/docs/guides/dekaf_reading_collections_from_kafka.md b/site/docs/guides/dekaf_reading_collections_from_kafka.md index 9e9a1c86a2..412d530630 100644 --- a/site/docs/guides/dekaf_reading_collections_from_kafka.md +++ b/site/docs/guides/dekaf_reading_collections_from_kafka.md @@ -101,7 +101,7 @@ kcat -C \ -X sasl.username="{}" \ -X sasl.password="Your_Estuary_Refresh_Token" \ -b dekaf.estuary-data.com:9092 \ - -t "full/nameof/estuarycolletion" \ + -t "full/nameof/estuarycollection" \ -p 0 \ -o beginning \ -s avro \ diff --git a/site/docs/reference/Configuring-task-shards.md b/site/docs/reference/Configuring-task-shards.md index c1458f5946..abd6dc7e0d 100644 --- a/site/docs/reference/Configuring-task-shards.md +++ b/site/docs/reference/Configuring-task-shards.md @@ -3,7 +3,7 @@ sidebar_position: 2 --- # Configuring task shards -For some catalog tasks, it's helpful to control the behavior of [shards](../concepts/advanced/shards.md) +For some catalog tasks, it's helpful to control the behavior of [shards](../concepts/advanced/shards.md). You do this by adding the `shards` configuration to the capture or materialization configuration. ## Properties diff --git a/site/docs/reference/Connectors/capture-connectors/MySQL/singlestore-batch.md b/site/docs/reference/Connectors/capture-connectors/MySQL/singlestore-batch.md index 96e9531c32..c7f7001d4e 100644 --- a/site/docs/reference/Connectors/capture-connectors/MySQL/singlestore-batch.md +++ b/site/docs/reference/Connectors/capture-connectors/MySQL/singlestore-batch.md @@ -19,9 +19,9 @@ If the dataset has a natural cursor that can identify only new or updated rows, 1. Ensure that [Estuary's IP addresses are allowlisted](/reference/allow-ip-addresses) to allow access. You can do by following [these steps](https://docs.singlestore.com/cloud/reference/management-api/#control-access-to-the-api) 2. Grab the following details from the SingleStore workspace. - 3. Workspace URL - 4. Username - 5. Password - 6. Database -7. Configure the Connector with the appropriate values. Make sure to specify the database name under the "Advanced" + 1. Workspace URL + 2. Username + 3. Password + 4. Database +3. Configure the Connector with the appropriate values. Make sure to specify the database name under the "Advanced" section. diff --git a/site/docs/reference/Connectors/capture-connectors/PostgreSQL/Supabase.md b/site/docs/reference/Connectors/capture-connectors/PostgreSQL/Supabase.md index 81672a202f..f799ae7957 100644 --- a/site/docs/reference/Connectors/capture-connectors/PostgreSQL/Supabase.md +++ b/site/docs/reference/Connectors/capture-connectors/PostgreSQL/Supabase.md @@ -183,7 +183,7 @@ store them separately. TOASTed values can sometimes present a challenge for systems that rely on the PostgreSQL write-ahead log (WAL), like this connector. If a change event occurs on a row that contains a TOASTed value, _but the TOASTed value itself is unchanged_, it is omitted from the WAL. -As a result, the connector emits a row update with the a value omitted, which might cause +As a result, the connector emits a row update with the value omitted, which might cause unexpected results in downstream catalog tasks if adjustments are not made. The PostgreSQL connector handles TOASTed values for you when you follow the [standard discovery workflow](/concepts/connectors.md#flowctl-discover) diff --git a/site/docs/reference/Connectors/capture-connectors/PostgreSQL/google-cloud-sql-postgres.md b/site/docs/reference/Connectors/capture-connectors/PostgreSQL/google-cloud-sql-postgres.md index cbdf3da1c5..a2b82f5a7f 100644 --- a/site/docs/reference/Connectors/capture-connectors/PostgreSQL/google-cloud-sql-postgres.md +++ b/site/docs/reference/Connectors/capture-connectors/PostgreSQL/google-cloud-sql-postgres.md @@ -139,7 +139,7 @@ store them separately. TOASTed values can sometimes present a challenge for systems that rely on the PostgreSQL write-ahead log (WAL), like this connector. If a change event occurs on a row that contains a TOASTed value, _but the TOASTed value itself is unchanged_, it is omitted from the WAL. -As a result, the connector emits a row update with the a value omitted, which might cause +As a result, the connector emits a row update with the value omitted, which might cause unexpected results in downstream catalog tasks if adjustments are not made. The PostgreSQL connector handles TOASTed values for you when you follow the [standard discovery workflow](../../../../concepts/connectors.md#flowctl-discover) diff --git a/site/docs/reference/Connectors/capture-connectors/PostgreSQL/neon-postgres.md b/site/docs/reference/Connectors/capture-connectors/PostgreSQL/neon-postgres.md index b9dc43570e..4d584ad5eb 100644 --- a/site/docs/reference/Connectors/capture-connectors/PostgreSQL/neon-postgres.md +++ b/site/docs/reference/Connectors/capture-connectors/PostgreSQL/neon-postgres.md @@ -195,7 +195,7 @@ store them separately. TOASTed values can sometimes present a challenge for systems that rely on the PostgreSQL write-ahead log (WAL), like this connector. If a change event occurs on a row that contains a TOASTed value, _but the TOASTed value itself is unchanged_, it is omitted from the WAL. -As a result, the connector emits a row update with the a value omitted, which might cause +As a result, the connector emits a row update with the value omitted, which might cause unexpected results in downstream catalog tasks if adjustments are not made. The PostgreSQL connector handles TOASTed values for you when you follow the [standard discovery workflow](../../../../concepts/connectors.md#flowctl-discover) diff --git a/site/docs/reference/Connectors/capture-connectors/alpaca.md b/site/docs/reference/Connectors/capture-connectors/alpaca.md index c7320d7417..12ffc4c9a0 100644 --- a/site/docs/reference/Connectors/capture-connectors/alpaca.md +++ b/site/docs/reference/Connectors/capture-connectors/alpaca.md @@ -24,7 +24,7 @@ See [limitations](#limitations) to learn more about reconciling historical and r ## Supported data resources -Alpaca supports over 8000 stocks and EFTs. You simply supply a list of [symbols](https://eoddata.com/symbols.aspx) to Flow when you configure the connector. +Alpaca supports over 8000 stocks and ETFs. You simply supply a list of [symbols](https://eoddata.com/symbols.aspx) to Flow when you configure the connector. To check whether Alpaca supports a symbol, you can use the [Alpaca Broker API](https://alpaca.markets/docs/api-references/broker-api/assets/#retrieving-an-asset-by-symbol). You can use this connector to capture data from up to 20 stock symbols into Flow collections in a single capture diff --git a/site/docs/reference/Connectors/capture-connectors/amazon-s3.md b/site/docs/reference/Connectors/capture-connectors/amazon-s3.md index a96c7981cb..567b929cd5 100644 --- a/site/docs/reference/Connectors/capture-connectors/amazon-s3.md +++ b/site/docs/reference/Connectors/capture-connectors/amazon-s3.md @@ -25,7 +25,7 @@ See the steps below to set up access. ### Setup: Public buckets -For a public buckets, the bucket access policy must allow anonymous reads on the whole bucket or a specific prefix. +For a public bucket, the bucket access policy must allow anonymous reads on the whole bucket or a specific prefix. 1. Create a bucket policy using the templates below. diff --git a/site/docs/reference/Connectors/capture-connectors/apache-kafka.md b/site/docs/reference/Connectors/capture-connectors/apache-kafka.md index a18911fec8..218a57948e 100644 --- a/site/docs/reference/Connectors/capture-connectors/apache-kafka.md +++ b/site/docs/reference/Connectors/capture-connectors/apache-kafka.md @@ -112,7 +112,7 @@ See [connectors](../../../concepts/connectors.md#using-connectors) to learn more | **`schema_registry/schema_registry_type`** | Schema Registry Type | Either `confluent_schema_registry` or `no_schema_registry`. | object | Required | | `/schema_registry/endpoint` | Schema Registry Endpoint | Schema registry API endpoint. For example: https://registry-id.us-east-2.aws.confluent.cloud. | string | | | `/schema_registry/username` | Schema Registry Username | Schema registry username to use for authentication. If you are using Confluent Cloud, this will be the 'Key' from your schema registry API key. | string | | -| `/schema_registry/password` | Schema Registry Password | Schema registry password to use for authentication. If you are using Confluent Cloud, this will be the 'Secret' from your schema registry API key.. string | | +| `/schema_registry/password` | Schema Registry Password | Schema registry password to use for authentication. If you are using Confluent Cloud, this will be the 'Secret' from your schema registry API key. | string | | | `/schema_registry/enable_json_only` | Capture Messages in JSON Format Only | If no schema registry is configured the capture will attempt to parse all data as JSON, and discovered collections will use a key of the message partition & offset. | boolean | | diff --git a/site/docs/reference/Connectors/capture-connectors/facebook-marketing.md b/site/docs/reference/Connectors/capture-connectors/facebook-marketing.md index a0696a32c9..8992f9a8d1 100644 --- a/site/docs/reference/Connectors/capture-connectors/facebook-marketing.md +++ b/site/docs/reference/Connectors/capture-connectors/facebook-marketing.md @@ -32,7 +32,7 @@ the manual method is the only supported method using the command line. ### Signing in with OAuth2 -To use OAuth2 in the Flow web app, you'll need A Facebook Business account and its [Ad Account ID](https://www.facebook.com/business/help/1492627900875762). +To use OAuth2 in the Flow web app, you'll need a Facebook Business account and its [Ad Account ID](https://www.facebook.com/business/help/1492627900875762). ### Configuring manually with an access token diff --git a/site/docs/reference/Connectors/capture-connectors/gitlab.md b/site/docs/reference/Connectors/capture-connectors/gitlab.md index 7e284809b5..0ee3f14373 100644 --- a/site/docs/reference/Connectors/capture-connectors/gitlab.md +++ b/site/docs/reference/Connectors/capture-connectors/gitlab.md @@ -9,7 +9,7 @@ This connector is based on an open-source connector from a third party, with mod ## Supported data resources -When you [configure the connector](#endpoint), you may a list of GitLab Groups or Projects from which to capture data. +When you [configure the connector](#endpoint), you may provide a list of GitLab Groups or Projects from which to capture data. From your selection, the following data resources are captured: @@ -32,7 +32,7 @@ From your selection, the following data resources are captured: - [Releases](https://docs.gitlab.com/ee/api/releases/index.html) - [Group Labels](https://docs.gitlab.com/ee/api/group_labels.html) - [Project Labels](https://docs.gitlab.com/ee/api/labels.html) - - [Epics](https://docs.gitlab.com/ee/api/epics.html)(only available for GitLab Ultimate and GitLab.com Gold accounts) + - [Epics](https://docs.gitlab.com/ee/api/epics.html) (only available for GitLab Ultimate and GitLab.com Gold accounts) - [Epic Issues](https://docs.gitlab.com/ee/api/epic_issues.html) (only available for GitLab Ultimate and GitLab.com Gold accounts) Each resource is mapped to a Flow collection through a separate binding. @@ -43,7 +43,7 @@ There are two ways to authenticate with GitLab when capturing data into Flow: us Their prerequisites differ. OAuth is recommended for simplicity in the Flow web app; -the access token method is the only supported method using the command line. Which authentication method you choose depends on the policies of your organization. Github has special organization settings that need to be enabled in order for users to be able to access repos that are part of an organization. +the access token method is the only supported method using the command line. Which authentication method you choose depends on the policies of your organization. GitLab has special organization settings that need to be enabled in order for users to be able to access repos that are part of an organization. ### Using OAuth2 to authenticate with GitLab in the Flow web app @@ -53,7 +53,7 @@ the access token method is the only supported method using the command line. Whi * A GitLab user account with access to all entities of interest. -* A GitLab [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)). +* A GitLab [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html). ## Configuration diff --git a/site/docs/reference/Connectors/capture-connectors/google-search-console.md b/site/docs/reference/Connectors/capture-connectors/google-search-console.md index b4359dfe83..01c1eacd67 100644 --- a/site/docs/reference/Connectors/capture-connectors/google-search-console.md +++ b/site/docs/reference/Connectors/capture-connectors/google-search-console.md @@ -30,7 +30,7 @@ You add these to the [endpoint configuration](#endpoint) in the format `{"name": Each report is mapped to an additional Flow collection. :::caution -Custom reports involve an integration with Google Universal Analytics, which Google will deprecate in July 2023. +Custom reports involve an integration with Google Universal Analytics, which Google deprecated in July 2023. ::: ## Prerequisites @@ -59,7 +59,7 @@ You'll need: Follow the steps below to meet these prerequisites: -1. Create a [service account and generate a JSON key](https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount) +1. Create a [service account and generate a JSON key](https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount). You'll copy the contents of the downloaded key file into the Service Account Credentials parameter when you configure the connector. 2. [Set up domain-wide delegation for the service account](https://developers.google.com/workspace/guides/create-credentials#optional_set_up_domain-wide_delegation_for_a_service_account). @@ -93,7 +93,7 @@ so many of these properties aren't required. | Property | Title | Description | Type | Required/Default | |---|---|---|---|---| -| **`/stream`** | Stream | Google Search Consol resource from which a collection is captured. | string | Required | +| **`/stream`** | Stream | Google Search Console resource from which a collection is captured. | string | Required | | **`/syncMode`** | Sync Mode | Connection method. | string | Required | ### Sample diff --git a/site/docs/reference/Connectors/capture-connectors/google-sheets.md b/site/docs/reference/Connectors/capture-connectors/google-sheets.md index fe77b02127..1002693bfb 100644 --- a/site/docs/reference/Connectors/capture-connectors/google-sheets.md +++ b/site/docs/reference/Connectors/capture-connectors/google-sheets.md @@ -27,10 +27,10 @@ spreadsheet: 1. The first row must be frozen and contain header names for each column. 1. If the first row is not frozen or does not contain header names, header names will be set using high-case alphabet letters (A,B,C,D...Z). -2. Sheet is not a image sheet or contains images. +2. Sheet is not an image sheet or contains images. 3. Sheet is not empty. 1. If a Sheet is empty, the connector will not break and wait for changes - inside the Sheet. When new data arrives, you will be prompted by flow to allow + inside the Sheet. When new data arrives, you will be prompted by Flow to allow for schema changes. 4. Sheet does not contain `formulaValue` inside any cell. diff --git a/site/docs/reference/Connectors/capture-connectors/harvest.md b/site/docs/reference/Connectors/capture-connectors/harvest.md index 88a02f786b..b7d84cf328 100644 --- a/site/docs/reference/Connectors/capture-connectors/harvest.md +++ b/site/docs/reference/Connectors/capture-connectors/harvest.md @@ -36,6 +36,7 @@ The following data resources are supported through the Harvest APIs: * [Uninvoiced Report](https://help.getharvest.com/api-v2/reports-api/reports/uninvoiced-report/) * [Time Reports](https://help.getharvest.com/api-v2/reports-api/reports/time-reports/) * [Project Budget Report](https://help.getharvest.com/api-v2/reports-api/reports/project-budget-report/) + By default, each resource is mapped to a Flow collection through a separate binding. ## Prerequisites @@ -55,7 +56,7 @@ See [connectors](../../../concepts/connectors.md#using-connectors) to learn more |---|---|---|---|---| | `/account_id` | Account ID | Harvest account ID. Required for all Harvest requests in pair with Personal Access Token. | string | Required | | `/start_date` | Start Date | UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. | string | Required | -| `/end_date` | End Date | UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated. | string | Default | +| `/end_date` | End Date | UTC date and time in the format 2021-01-25T00:00:00Z. Any data after this date will not be replicated. | string | Default | #### Bindings diff --git a/site/docs/reference/Connectors/capture-connectors/marketo.md b/site/docs/reference/Connectors/capture-connectors/marketo.md index 4168efe074..fb1f19d834 100644 --- a/site/docs/reference/Connectors/capture-connectors/marketo.md +++ b/site/docs/reference/Connectors/capture-connectors/marketo.md @@ -11,7 +11,7 @@ This connector is based on an open-source connector from a third party, with mod This connector can be used to sync the following tables from Marketo: -* **activities\_X** where X is an activity type contains information about lead activities of the type X. For example, activities\_send\_email contains information about lead activities related to the activity type `send_email`. See the [Marketo docs](https://developers.marketo.com/rest-api/endpoint-reference/lead-database-endpoint-reference/#!/Activities/getLeadActivitiesUsingGET) for a detailed explanation of what each column means. +* **activities\_X** where X is an activity type. Contains information about lead activities of the type X. For example, activities\_send\_email contains information about lead activities related to the activity type `send_email`. See the [Marketo docs](https://developers.marketo.com/rest-api/endpoint-reference/lead-database-endpoint-reference/#!/Activities/getLeadActivitiesUsingGET) for a detailed explanation of what each column means. * **activity\_types.** Contains metadata about activity types. See the [Marketo docs](https://developers.marketo.com/rest-api/endpoint-reference/lead-database-endpoint-reference/#!/Activities/getAllActivityTypesUsingGET) for a detailed explanation of columns. * **campaigns.** Contains info about your Marketo campaigns. [Marketo docs](https://developers.marketo.com/rest-api/endpoint-reference/lead-database-endpoint-reference/#!/Campaigns/getCampaignsUsingGET). * **leads.** Contains info about your Marketo leads. [Marketo docs](https://developers.marketo.com/rest-api/endpoint-reference/lead-database-endpoint-reference/#!/Leads/getLeadByIdUsingGET). diff --git a/site/docs/reference/Connectors/capture-connectors/snapchat.md b/site/docs/reference/Connectors/capture-connectors/snapchat.md index abce762c09..081c1b27ae 100644 --- a/site/docs/reference/Connectors/capture-connectors/snapchat.md +++ b/site/docs/reference/Connectors/capture-connectors/snapchat.md @@ -23,8 +23,6 @@ This connector can be used to sync the following tables from Snapchat: * AdaccountsStatsLifetime * AdsStatsHourly * AdsStatsDaily -* AdsStatsHourly -* AdsStatsDaily * AdsStatsLifetime * AdsquadsStatsDaily * AdsquadsStatsLifetime diff --git a/site/docs/reference/Connectors/capture-connectors/tiktok.md b/site/docs/reference/Connectors/capture-connectors/tiktok.md index 37af1b8085..e0c2eac63a 100644 --- a/site/docs/reference/Connectors/capture-connectors/tiktok.md +++ b/site/docs/reference/Connectors/capture-connectors/tiktok.md @@ -89,7 +89,7 @@ so many of these properties aren't required. | `/credentials/access_token` | Access Token | The long-term authorized access token. | string | | | `/end_date` | End Date | The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DD. All data generated between `start_date` and this date will be replicated. Not setting this option will result in always syncing the data till the current date. | string | | | `/report_granularity` | Report Aggregation Granularity | The granularity used for [aggregating performance data in reports](#report-aggregation). Choose `DAY`, `LIFETIME`, or `HOUR`.| string | | -| `/start_date` | Start Date | Replication Start Date | The Start Date in format: YYYY-MM-DD. Any data before this date will not be replicated. If this parameter is not set, all data will be replicated. | string | | +| `/start_date` | Replication Start Date | The Start Date in format: YYYY-MM-DD. Any data before this date will not be replicated. If this parameter is not set, all data will be replicated. | string | | #### Bindings diff --git a/site/docs/reference/Connectors/dekaf/dekaf-clickhouse.md b/site/docs/reference/Connectors/dekaf/dekaf-clickhouse.md index ce3d815fb7..0c15f914dc 100644 --- a/site/docs/reference/Connectors/dekaf/dekaf-clickhouse.md +++ b/site/docs/reference/Connectors/dekaf/dekaf-clickhouse.md @@ -17,7 +17,7 @@ array of sources supported by Estuary Flow directly into ClickHouse, using Dekaf ## Step 1: Configure Data Source in Estuary Flow -1. **Generate a [Refresh Token](Estuary Refresh Token ([Generate a refresh token](/guides/how_to_generate_refresh_token))**: +1. **Generate an [Estuary Refresh Token](/guides/how_to_generate_refresh_token)**: - To access the Kafka-compatible topics, create a refresh token in the Estuary Flow dashboard. This token will act as the password for both the broker and schema registry. diff --git a/site/docs/reference/Connectors/materialization-connectors/MySQL/amazon-rds-mysql.md b/site/docs/reference/Connectors/materialization-connectors/MySQL/amazon-rds-mysql.md index 6ecdd0adf0..5096fea088 100644 --- a/site/docs/reference/Connectors/materialization-connectors/MySQL/amazon-rds-mysql.md +++ b/site/docs/reference/Connectors/materialization-connectors/MySQL/amazon-rds-mysql.md @@ -74,7 +74,7 @@ To allow SSH tunneling to a database instance hosted on AWS, you'll need to crea 1. Refer to the [guide](/guides/connect-network/) to configure an SSH server on the cloud platform of your choice. 2. Configure your connector as described in the [configuration](#configuration) section above, - with the additional of the `networkTunnel` stanza to enable the SSH tunnel, if using. + with the addition of the `networkTunnel` stanza to enable the SSH tunnel, if using. See [Connecting to endpoints on secure networks](/concepts/connectors.md#connecting-to-endpoints-on-secure-networks) for additional details and a sample. diff --git a/site/docs/reference/Connectors/materialization-connectors/MySQL/google-cloud-sql-mysql.md b/site/docs/reference/Connectors/materialization-connectors/MySQL/google-cloud-sql-mysql.md index eab2df51e4..b8bc625f22 100644 --- a/site/docs/reference/Connectors/materialization-connectors/MySQL/google-cloud-sql-mysql.md +++ b/site/docs/reference/Connectors/materialization-connectors/MySQL/google-cloud-sql-mysql.md @@ -24,7 +24,7 @@ To use this connector, you'll need: ## Setup -### Conenecting Directly to Google Cloud SQL +### Connecting Directly to Google Cloud SQL 1. [Enable public IP on your database](https://cloud.google.com/sql/docs/mysql/configure-ip#add) and add the [Estuary Flow IP addresses](/reference/allow-ip-addresses) as authorized IP addresses. @@ -66,6 +66,8 @@ To allow SSH tunneling to a database instance hosted on Google Cloud, you must s To configure the connector, you must specify the database address in the format `host:port`. (You can also supply `host` only; the connector will use the port `3306` by default, which is correct in many cases.) You can find the host and port in the following locations in each platform's console: + +- Google Cloud SQL: host as Private IP Address; port is always `3306`. You may need to [configure private IP](https://cloud.google.com/sql/docs/mysql/configure-private-ip) on your database. ::: ## Configuration @@ -180,14 +182,14 @@ materializations: - When you configure your connector as described in the [configuration](#configuration) section above, including the additional `networkTunnel` configuration to enable the SSH tunnel. See [Connecting to endpoints on secure networks](/concepts/connectors.md#connecting-to-endpoints-on-secure-networks) for additional details and a sample. 2. Configure your connector as described in the [configuration](#configuration) section above, - with the additional of the `networkTunnel` stanza to enable the SSH tunnel, if using. + with the addition of the `networkTunnel` stanza to enable the SSH tunnel, if using. See [Connecting to endpoints on secure networks](/concepts/connectors.md#connecting-to-endpoints-on-secure-networks) for additional details and a sample. :::tip Configuration Tip To configure the connector, you must specify the database address in the format `host:port`. (You can also supply `host` only; the connector will use the port `3306` by default, which is correct in many cases.) -You can find the host host in the GCP console as "Private IP Address". The pport is always `3306`. You may need to [configure private IP](https://cloud.google.com/sql/docs/mysql/configure-private-ip) on your database. +You can find the host in the GCP console as "Private IP Address". The port is always `3306`. You may need to [configure private IP](https://cloud.google.com/sql/docs/mysql/configure-private-ip) on your database. ::: 3. Create the `flow_materialize` user with `All` privileges on your database. This user will need the ability to create and update the `flow_materializations` table. diff --git a/site/docs/reference/Connectors/materialization-connectors/MySQL/mysql.md b/site/docs/reference/Connectors/materialization-connectors/MySQL/mysql.md index 9162c76a82..affdbbbcba 100644 --- a/site/docs/reference/Connectors/materialization-connectors/MySQL/mysql.md +++ b/site/docs/reference/Connectors/materialization-connectors/MySQL/mysql.md @@ -160,7 +160,7 @@ There are two ways to do this: by granting direct access to Flow's IP or by crea 1. [Modify the instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html), choosing **Publicly accessible** in the **Connectivity** settings. 2. Per the [steps in the Amazon documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.RDSSecurityGroups.html#Overview.RDSSecurityGroups.Create), - create a new inbound rule and a new outbound rule that allow all traffic from the IP addresses ``. + create a new inbound rule and a new outbound rule that allow all traffic from [Estuary's IP addresses](../../../allow-ip-addresses.md). - **Connect directly with Google Cloud SQL**: [Enable public IP on your database](https://cloud.google.com/sql/docs/mysql/configure-ip#add) and add [Estuary Flow IP addresses](/reference/allow-ip-addresses) as authorized IP addresses. See the instructions below to use SSH Tunneling instead of enabling public access. @@ -169,7 +169,7 @@ There are two ways to do this: by granting direct access to Flow's IP or by crea 1. Refer to the [guide](/guides/connect-network/) to configure an SSH server on the cloud platform of your choice. 2. Configure your connector as described in the [configuration](#configuration) section above, - with the additional of the `networkTunnel` stanza to enable the SSH tunnel, if using. + with the addition of the `networkTunnel` stanza to enable the SSH tunnel, if using. See [Connecting to endpoints on secure networks](/concepts/connectors.md#connecting-to-endpoints-on-secure-networks) for additional details and a sample. diff --git a/site/docs/reference/Connectors/materialization-connectors/PostgreSQL/google-cloud-sql-postgres.md b/site/docs/reference/Connectors/materialization-connectors/PostgreSQL/google-cloud-sql-postgres.md index 2ecaee0b4e..5f2ea0ad3a 100644 --- a/site/docs/reference/Connectors/materialization-connectors/PostgreSQL/google-cloud-sql-postgres.md +++ b/site/docs/reference/Connectors/materialization-connectors/PostgreSQL/google-cloud-sql-postgres.md @@ -21,7 +21,7 @@ To use this connector, you'll need: You must configure your database to allow connections from Estuary. There are two ways to do this: by granting direct access to Flow's IP or by creating an SSH tunnel. -### Conenecting Directly to Google Cloud SQL +### Connecting Directly to Google Cloud SQL 1. [Enable public IP on your database](https://cloud.google.com/sql/docs/mysql/configure-ip#add) and add the [Estuary Flow IP addresses](/reference/allow-ip-addresses) as authorized IP addresses. diff --git a/site/docs/reference/Connectors/materialization-connectors/SQLServer/amazon-rds-sqlserver.md b/site/docs/reference/Connectors/materialization-connectors/SQLServer/amazon-rds-sqlserver.md index 357195afb3..e63738898e 100644 --- a/site/docs/reference/Connectors/materialization-connectors/SQLServer/amazon-rds-sqlserver.md +++ b/site/docs/reference/Connectors/materialization-connectors/SQLServer/amazon-rds-sqlserver.md @@ -53,7 +53,7 @@ GRANT CONTROL ON DATABASE:: TO flow_materialize; 1. To allow direct access: - [Modify the database](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html), setting **Public accessibility** to **Yes**. - - Edit the VPC security group associated with your database, or create a new VPC security group and associate it as described in [the Amazon documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.RDSSecurityGroups.html#Overview.RDSSecurityGroups.Create).Create a new inbound rule and a new outbound rule that allow all traffic from the [Estuary Flow IP addresses](/reference/allow-ip-addresses). + - Edit the VPC security group associated with your database, or create a new VPC security group and associate it as described in [the Amazon documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.RDSSecurityGroups.html#Overview.RDSSecurityGroups.Create). Create a new inbound rule and a new outbound rule that allow all traffic from the [Estuary Flow IP addresses](/reference/allow-ip-addresses). 2. To allow secure connections via SSH tunneling: - Follow the guide to [configure an SSH server for tunneling](../../../../../guides/connect-network/) diff --git a/site/docs/reference/Connectors/materialization-connectors/alloydb.md b/site/docs/reference/Connectors/materialization-connectors/alloydb.md index 85aee1a444..3fb6616db5 100644 --- a/site/docs/reference/Connectors/materialization-connectors/alloydb.md +++ b/site/docs/reference/Connectors/materialization-connectors/alloydb.md @@ -22,7 +22,7 @@ in the same Google Cloud project as your instance. ## Configuration To use this connector, begin with data in one or more Flow collections. -Use the below properties to configure a AlloyDB materialization, which will direct one or more of your Flow collections to your desired tables, or views, in the database. +Use the below properties to configure an AlloyDB materialization, which will direct one or more of your Flow collections to your desired tables, or views, in the database. ### Properties @@ -97,7 +97,7 @@ materializations: PostgreSQL has a list of reserved words that must be quoted in order to be used as an identifier. Flow considers all the reserved words that are marked as "reserved" in any of the columns in the official [PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-keywords-appendix.html). -These reserve words are listed in the table below. Flow automatically quotes fields that are in this list. +These reserved words are listed in the table below. Flow automatically quotes fields that are in this list. |Reserved words| | | | | |---|---|---|---|---| diff --git a/site/docs/reference/Connectors/materialization-connectors/slack.md b/site/docs/reference/Connectors/materialization-connectors/slack.md index d0d467cca5..386e993f91 100644 --- a/site/docs/reference/Connectors/materialization-connectors/slack.md +++ b/site/docs/reference/Connectors/materialization-connectors/slack.md @@ -20,18 +20,18 @@ The Slack connector is available for use in the Flow web application. To learn m | Property | Title | Description | Type | Required/Default | | --- | --- | --- | --- | --- | -| /access_token | Access Token | The Slack API access token for authentication. | string | Required | -| /client_id | Client ID | Client ID for authentication. | string | Required | -| /client_secret | Client Secret | The Slack API client secret. | string | Required | +| `/access_token` | Access Token | The Slack API access token for authentication. | string | Required | +| `/client_id` | Client ID | Client ID for authentication. | string | Required | +| `/client_secret` | Client Secret | The Slack API client secret. | string | Required | #### Bindings | Property | Title | Description | Type | Required/Default | | --- | --- | --- | --- | --- | -| /source | Source | Source data in Flow to be sent to Slack. | string | Required | -| /channel | Channel | The ID of the Slack channel to send messages to. | string | Required | -| /display_name | Display Name | The display name for the sender in Slack. | string | | -| /logo_emoji | Logo Emoji | The emoji to be used. | string | | +| `/source` | Source | Source data in Flow to be sent to Slack. | string | Required | +| `/channel` | Channel | The ID of the Slack channel to send messages to. | string | Required | +| `/display_name` | Display Name | The display name for the sender in Slack. | string | | +| `/logo_emoji` | Logo Emoji | The emoji to be used. | string | | ### Sample diff --git a/site/docs/reference/Connectors/materialization-connectors/starburst.md b/site/docs/reference/Connectors/materialization-connectors/starburst.md index eee6d4f7a8..1b58305ffb 100644 --- a/site/docs/reference/Connectors/materialization-connectors/starburst.md +++ b/site/docs/reference/Connectors/materialization-connectors/starburst.md @@ -1,9 +1,9 @@ # Starburst -This connector materializes transactionally Flow collections into Iceberg or Delta Lake tables using AWS S3 storage in [Starburst Galaxy](https://www.starburst.io/platform/starburst-galaxy/). -Starburst Galaxy connector supports only standard(merge) updates. +This connector transactionally materializes Flow collections into Iceberg or Delta Lake tables using AWS S3 storage in [Starburst Galaxy](https://www.starburst.io/platform/starburst-galaxy/). +The Starburst Galaxy connector supports only standard (merge) updates. -The connector makes use of S3 AWS storage for storing temporarily data during the materialization process. +The connector makes use of S3 AWS storage for temporarily storing data during the materialization process. [`ghcr.io/estuary/materialize-starburst:dev`](https://ghcr.io/estuary/materialize-starburst:dev) provides the latest connector image. You can also follow the link in your browser to see past image versions. @@ -11,18 +11,18 @@ The connector makes use of S3 AWS storage for storing temporarily data during th To use this connector, you'll need: -* A Starburst Galaxy account (To create one: [Staburst Galaxy start](https://www.starburst.io/platform/starburst-galaxy/start/) that includes: +* A Starburst Galaxy account (To create one see: [Staburst Galaxy start](https://www.starburst.io/platform/starburst-galaxy/start/)) that includes: * A running cluster containing an [Amazon S3](https://docs.starburst.io/starburst-galaxy/working-with-data/create-catalogs/object-storage/s3.html) catalog * A [schema](https://docs.starburst.io/starburst-galaxy/data-engineering/working-with-data-lakes/table-formats/index.html#create-schema) which is a logical grouping of tables - * Storage on S3 for temporary data with `awsAccessKeyId` and `awsSecretAccessKey` which should correspond to used catalog - * A user with a role assigned that grants access to create, modify, drop tables in specified Amazon S3 catalog + * Storage on S3 for temporary data with `awsAccessKeyId` and `awsSecretAccessKey` which should correspond to the chosen catalog + * A user with a role assigned that grants access to create, modify, and drop tables in the specified Amazon S3 catalog * At least one Flow collection ### Setup -To get host go to your Cluster -> Connection info -> Other clients ([Connect clients](https://docs.starburst.io/starburst-galaxy/working-with-data/query-data/connect-clients.html)) +To get the host, go to your Cluster -> Connection info -> Other clients, as specified in Starburst's [Connect clients](https://docs.starburst.io/starburst-galaxy/working-with-data/query-data/connect-clients.html) docs. -There is also need to grant access to temporary storage (Roles and privileges -> Select specific role -> Privileges -> Add privilege -> Location). "Create schema and table in location" should be selected. [Doc](https://docs.starburst.io/starburst-galaxy/cluster-administration/manage-cluster-access/manage-users-roles-and-tags/account-and-cluster-privileges-and-entities.html#location-privileges-) +You will also need to grant access to temporary storage (Roles and privileges -> Select specific role -> Privileges -> Add privilege -> Location). "Create schema and table in location" should be selected. See the Starburst [docs](https://docs.starburst.io/starburst-galaxy/cluster-administration/manage-cluster-access/manage-users-roles-and-tags/account-and-cluster-privileges-and-entities.html#location-privileges-) for more. ## Configuration diff --git a/site/docs/reference/Connectors/materialization-connectors/timescaledb.md b/site/docs/reference/Connectors/materialization-connectors/timescaledb.md index 0d09abe00b..363ecc8a67 100644 --- a/site/docs/reference/Connectors/materialization-connectors/timescaledb.md +++ b/site/docs/reference/Connectors/materialization-connectors/timescaledb.md @@ -103,7 +103,7 @@ The default is to use standard updates. PostgreSQL (and thus TimescaleDB) has a list of reserved words that must be quoted in order to be used as an identifier. Flow considers all the reserved words that are marked as "reserved" in any of the columns in the official [PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-keywords-appendix.html). -These reserve words are listed in the table below. Flow automatically quotes fields that are in this list. +These reserved words are listed in the table below. Flow automatically quotes fields that are in this list. |Reserved words| | | | | |---|---|---|---|---| diff --git a/site/docs/reference/editing.md b/site/docs/reference/editing.md index 5e57d753be..2645278b5a 100644 --- a/site/docs/reference/editing.md +++ b/site/docs/reference/editing.md @@ -24,7 +24,7 @@ You do this by pulling the desired specification locally, editing, and re-publis ## Endpoint configuration changes -A common reason to edit a capture or materialization to fix a broken endpoint configuration: +A common reason to edit a capture or materialization is to fix a broken endpoint configuration: for example, if a database is now accessed through a different port. Changes that prevent Flow from finding the source system immediately cause the capture or materialization to fail. @@ -64,5 +64,5 @@ It *is* possible to manually change the names of destination resources (tables o You should avoid doing so unless you want to route future data to a new location. If you do this, a new resource with that name will be created and the old resource will continue to exist. -Historical data will may not be backfilled into the new resource, depending on the connector used. +Historical data may not be backfilled into the new resource, depending on the connector used. diff --git a/site/docs/reference/materialization-sync-schedule.md b/site/docs/reference/materialization-sync-schedule.md index 809a54fd33..bcdc216c3f 100644 --- a/site/docs/reference/materialization-sync-schedule.md +++ b/site/docs/reference/materialization-sync-schedule.md @@ -7,7 +7,7 @@ could reduce your compute charges by running a single large query every 30 minutes rather than many smaller queries every few seconds. :::note -Syncing data less frequently to your destination system does _not_ effect the +Syncing data less frequently to your destination system does _not_ affect the cost for running the materialization connector within Estuary Flow. But it can reduce the costs incurred in the destination from the actions the connector takes to load data to it. @@ -80,7 +80,7 @@ disable and then re-enable the materialization. To use the same schedule for syncing data 24/7, set the value of **Sync Frequency** only and leave the other inputs empty. For example, you might set a -**Sync Frequency** of `15m` to always have you destination sync every 15 minutes +**Sync Frequency** of `15m` to always have your destination sync every 15 minutes instead of the default 30 minutes. :::tip @@ -133,7 +133,7 @@ Here are some examples of valid inputs for **Fast Sync Enabled Days**: ## Timing of syncs In technical terms, timing of syncs is controlled by the materialization -connector sending a transaction acknowledgement to the Flow runtime a computed +connector sending a transaction acknowledgement to the Flow runtime at computed times. Practically this means that at these times the prior transaction will complete and have its statistics recorded, and the next transaction will begin. @@ -156,10 +156,10 @@ acknowledgements will occur. :::info The `jitter` value is deterministic based on the *compute resource* for the destination system from the materialization's endpoint configuration. How this -compute resource is identified various for different systems, but is usually +compute resource is identified varies for different systems, but is usually something like `"account_name" + "warehouse_Name"`. -This means that separate materialization use the same compute resource will +This means that separate materializations using the same compute resource will synchronize their usage of that compute resource if they have the same **Sync Schedule** configured. ::: diff --git a/site/docs/reference/notifications.md b/site/docs/reference/notifications.md index 2e42de7580..cc3f8dcba7 100644 --- a/site/docs/reference/notifications.md +++ b/site/docs/reference/notifications.md @@ -10,7 +10,7 @@ In the `Admin` section of the Flow Web Application, navigate to the the `Setting ## Data Movement Alerts -When navigating to the main view of a capture or a materialization, a user can select an interval for tracking zero data movement. Under the `Nofitification Settiings` card, select a time interval from the dropdown labeled `Interval`. There is no need to save, but you must also have already configured notifications in order for the alert to take into effect. If you are not yet subscribed to notifications, a pop up will appear prompting you to set up a subscribing by clicking on `CLICK HERE`. +When navigating to the main view of a capture or a materialization, a user can select an interval for tracking zero data movement. Under the `Notification Settings` card, select a time interval from the dropdown labeled `Interval`. There is no need to save, but you must also have already configured notifications in order for the alert to take effect. If you are not yet subscribed to notifications, an info box will appear prompting you to set up a subscription by clicking on `CLICK HERE`. If your task does not receive any new documents with the selected timeframe, an email will be sent to any email addresses that are subscribed to this tenant. @@ -18,7 +18,7 @@ If your task does not receive any new documents with the selected timeframe, an Billing alerts are automatically subscribed to when a user inputs their email into the `Organization Notifications` table. Alerts will be sent out for the following events: -* **Free Tier Started**: A tenenat has transitioned into the free trial +* **Free Tier Started**: A tenant has transitioned into the free trial * **Free Trial Ending**: Five days are remaining in a tenant's free trial * **Free Trial Ended**: A tenant's free trial has ended * **Provided Payment Method**: A valid payment method has been provided for a tenant