Specifies the data of the Amazon Reshift target node.
+ * Specifies the data of the Amazon Redshift target node.
*/
Data?: AmazonRedshiftNodeData;
@@ -5304,6 +5304,175 @@ export interface SelectFromCollection {
Index: number | undefined;
}
+/**
+ * @public
+ * Specifies configuration for Snowflake nodes in Glue Studio.
+ */
+export interface SnowflakeNodeData {
+ /**
+ * Specifies how retrieved data is specified. Valid values: "table"
,
+ * "query"
.
+ */
+ SourceType?: string;
+
+ /**
+ * Specifies a Glue Data Catalog Connection to a Snowflake endpoint.
+ */
+ Connection?: Option;
+
+ /**
+ * Specifies a Snowflake database schema for your node to use.
+ */
+ Schema?: string;
+
+ /**
+ * Specifies a Snowflake table for your node to use.
+ */
+ Table?: string;
+
+ /**
+ * Specifies a Snowflake database for your node to use.
+ */
+ Database?: string;
+
+ /**
+ * Not currently used.
+ */
+ TempDir?: string;
+
+ /**
+ * Not currently used.
+ */
+ IamRole?: Option;
+
+ /**
+ * Specifies additional options passed to the Snowflake connector. If options are specified
+ * elsewhere in this node, this will take precedence.
+ */
+ AdditionalOptions?: Record;
+
+ /**
+ * A SQL string used to retrieve data with the query
sourcetype.
+ */
+ SampleQuery?: string;
+
+ /**
+ * A SQL string run before the Snowflake connector performs its standard actions.
+ */
+ PreAction?: string;
+
+ /**
+ * A SQL string run after the Snowflake connector performs its standard actions.
+ */
+ PostAction?: string;
+
+ /**
+ * Specifies what action to take when writing to a table with preexisting data. Valid values:
+ * append
, merge
, truncate
, drop
.
+ */
+ Action?: string;
+
+ /**
+ * Used when Action is append
. Specifies the resolution behavior when a row
+ * already exists. If true, preexisting rows will be updated. If false, those rows will be inserted.
+ */
+ Upsert?: boolean;
+
+ /**
+ * Specifies a merge action. Valid values: simple
, custom
. If
+ * simple, merge behavior is defined by MergeWhenMatched
and
+ * MergeWhenNotMatched
. If custom, defined by MergeClause
.
+ */
+ MergeAction?: string;
+
+ /**
+ * Specifies how to resolve records that match preexisting data when merging. Valid values:
+ * update
, delete
.
+ */
+ MergeWhenMatched?: string;
+
+ /**
+ * Specifies how to process records that do not match preexisting data when merging. Valid
+ * values: insert
, none
.
+ */
+ MergeWhenNotMatched?: string;
+
+ /**
+ * A SQL statement that specifies a custom merge behavior.
+ */
+ MergeClause?: string;
+
+ /**
+ * The name of a staging table used when performing merge
or upsert append
+ * actions. Data is written to this table, then moved to table
by a generated
+ * postaction.
+ */
+ StagingTable?: string;
+
+ /**
+ * Specifies the columns combined to identify a record when detecting matches for merges and
+ * upserts. A list of structures with value
, label
and
+ * description
keys. Each structure describes a column.
+ */
+ SelectedColumns?: Option[];
+
+ /**
+ * Specifies whether automatic query pushdown is enabled. If pushdown
+ * is enabled, then when a query is run on Spark, if part of the query can be "pushed down" to
+ * the
+ * Snowflake server, it is pushed down. This improves performance of some queries.
+ */
+ AutoPushdown?: boolean;
+
+ /**
+ * Manually defines the target schema for the node. A list of structures with value
+ * , label
and description
keys. Each structure defines a column.
+ */
+ TableSchema?: Option[];
+}
+
+/**
+ * @public
+ * Specifies a Snowflake data source.
+ */
+export interface SnowflakeSource {
+ /**
+ * The name of the Snowflake data source.
+ */
+ Name: string | undefined;
+
+ /**
+ * Configuration for the Snowflake data source.
+ */
+ Data: SnowflakeNodeData | undefined;
+
+ /**
+ * Specifies user-defined schemas for your output data.
+ */
+ OutputSchemas?: GlueSchema[];
+}
+
+/**
+ * @public
+ * Specifies a Snowflake target.
+ */
+export interface SnowflakeTarget {
+ /**
+ * The name of the Snowflake target.
+ */
+ Name: string | undefined;
+
+ /**
+ * Specifies the data of the Snowflake target node.
+ */
+ Data: SnowflakeNodeData | undefined;
+
+ /**
+ * The nodes that are inputs to the data target.
+ */
+ Inputs?: string[];
+}
+
/**
* @public
* Specifies a connector to an Apache Spark data source.
@@ -8182,66 +8351,3 @@ export class ValidationException extends __BaseException {
this.Message = opts.Message;
}
}
-
-/**
- * @public
- */
-export interface CreateJobResponse {
- /**
- * The unique name that was provided for this job definition.
- */
- Name?: string;
-}
-
-/**
- * @public
- * The parameters to configure the find matches transform.
- */
-export interface FindMatchesParameters {
- /**
- * The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.
- */
- PrimaryKeyColumnName?: string;
-
- /**
- * The value selected when tuning your transform for a balance between precision and recall.
- * A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a
- * value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0
- * means very low recall, and choosing values close to 0.0 results in very low precision.
- * The precision metric indicates how often your model is correct when it predicts a match.
- * The recall metric indicates that for an actual match, how often your model predicts the
- * match.
- */
- PrecisionRecallTradeoff?: number;
-
- /**
- * The value that is selected when tuning your transform for a balance between accuracy and
- * cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0
- * means a bias purely for accuracy, which typically results in a higher cost, sometimes
- * substantially higher. A value of 0.0 means a bias purely for cost, which results in a less
- * accurate FindMatches
transform, sometimes with unacceptable accuracy.
- * Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
- * Cost measures how many compute resources, and thus money, are consumed to run the
- * transform.
- */
- AccuracyCostTradeoff?: number;
-
- /**
- * The value to switch on or off to force the output to match the provided labels from users. If the value is True
, the find matches
transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False
, the find matches
transform does not ensure all the labels provided are respected, and the results rely on the trained model.
- * Note that setting this value to true may increase the conflation execution time.
- */
- EnforceProvidedLabels?: boolean;
-}
-
-/**
- * @public
- * @enum
- */
-export const TransformType = {
- FIND_MATCHES: "FIND_MATCHES",
-} as const;
-
-/**
- * @public
- */
-export type TransformType = (typeof TransformType)[keyof typeof TransformType];
diff --git a/clients/client-glue/src/models/models_1.ts b/clients/client-glue/src/models/models_1.ts
index ca6e1dbd4e399..287361dd2802f 100644
--- a/clients/client-glue/src/models/models_1.ts
+++ b/clients/client-glue/src/models/models_1.ts
@@ -20,7 +20,6 @@ import {
ErrorDetail,
EventBatchingCondition,
FederatedDatabase,
- FindMatchesParameters,
GlueTable,
JobRun,
Partition,
@@ -32,12 +31,74 @@ import {
SchemaId,
StorageDescriptor,
TaskStatusType,
- TransformType,
Trigger,
TriggerType,
WorkerType,
} from "./models_0";
+/**
+ * @public
+ */
+export interface CreateJobResponse {
+ /**
+ * The unique name that was provided for this job definition.
+ */
+ Name?: string;
+}
+
+/**
+ * @public
+ * The parameters to configure the find matches transform.
+ */
+export interface FindMatchesParameters {
+ /**
+ * The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.
+ */
+ PrimaryKeyColumnName?: string;
+
+ /**
+ * The value selected when tuning your transform for a balance between precision and recall.
+ * A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a
+ * value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0
+ * means very low recall, and choosing values close to 0.0 results in very low precision.
+ * The precision metric indicates how often your model is correct when it predicts a match.
+ * The recall metric indicates that for an actual match, how often your model predicts the
+ * match.
+ */
+ PrecisionRecallTradeoff?: number;
+
+ /**
+ * The value that is selected when tuning your transform for a balance between accuracy and
+ * cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0
+ * means a bias purely for accuracy, which typically results in a higher cost, sometimes
+ * substantially higher. A value of 0.0 means a bias purely for cost, which results in a less
+ * accurate FindMatches
transform, sometimes with unacceptable accuracy.
+ * Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
+ * Cost measures how many compute resources, and thus money, are consumed to run the
+ * transform.
+ */
+ AccuracyCostTradeoff?: number;
+
+ /**
+ * The value to switch on or off to force the output to match the provided labels from users. If the value is True
, the find matches
transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False
, the find matches
transform does not ensure all the labels provided are respected, and the results rely on the trained model.
+ * Note that setting this value to true may increase the conflation execution time.
+ */
+ EnforceProvidedLabels?: boolean;
+}
+
+/**
+ * @public
+ * @enum
+ */
+export const TransformType = {
+ FIND_MATCHES: "FIND_MATCHES",
+} as const;
+
+/**
+ * @public
+ */
+export type TransformType = (typeof TransformType)[keyof typeof TransformType];
+
/**
* @public
* The algorithm-specific parameters that are associated with the machine learning
@@ -7011,201 +7072,3 @@ export interface GetUnfilteredPartitionMetadataRequest {
*/
SupportedPermissionTypes: (PermissionType | string)[] | undefined;
}
-
-/**
- * @public
- */
-export interface GetUnfilteredPartitionMetadataResponse {
- /**
- *
A Partition object containing the partition metadata.
- */
- Partition?: Partition;
-
- /**
- * A list of column names that the user has been granted access to.
- */
- AuthorizedColumns?: string[];
-
- /**
- * A Boolean value that indicates whether the partition location is registered
- * with Lake Formation.
- */
- IsRegisteredWithLakeFormation?: boolean;
-}
-
-/**
- * @public
- * The operation timed out.
- */
-export class PermissionTypeMismatchException extends __BaseException {
- readonly name: "PermissionTypeMismatchException" = "PermissionTypeMismatchException";
- readonly $fault: "client" = "client";
- /**
- * There is a mismatch between the SupportedPermissionType used in the query request
- * and the permissions defined on the target table.
- */
- Message?: string;
- /**
- * @internal
- */
- constructor(opts: __ExceptionOptionType) {
- super({
- name: "PermissionTypeMismatchException",
- $fault: "client",
- ...opts,
- });
- Object.setPrototypeOf(this, PermissionTypeMismatchException.prototype);
- this.Message = opts.Message;
- }
-}
-
-/**
- * @public
- */
-export interface GetUnfilteredPartitionsMetadataRequest {
- /**
- * The ID of the Data Catalog where the partitions in question reside. If none is provided,
- * the AWS account ID is used by default.
- */
- CatalogId: string | undefined;
-
- /**
- * The name of the catalog database where the partitions reside.
- */
- DatabaseName: string | undefined;
-
- /**
- * The name of the table that contains the partition.
- */
- TableName: string | undefined;
-
- /**
- * An expression that filters the partitions to be returned.
- * The expression uses SQL syntax similar to the SQL WHERE
filter clause. The
- * SQL statement parser JSQLParser parses the expression.
- *
- * Operators: The following are the operators that you can use in the
- * Expression
API call:
- *
- * - =
- * -
- *
Checks whether the values of the two operands are equal; if yes, then the condition becomes
- * true.
- * Example: Assume 'variable a' holds 10 and 'variable b' holds 20.
- * (a = b) is not true.
- *
- * - < >
- * -
- *
Checks whether the values of two operands are equal; if the values are not equal,
- * then the condition becomes true.
- * Example: (a < > b) is true.
- *
- * - >
- * -
- *
Checks whether the value of the left operand is greater than the value of the right
- * operand; if yes, then the condition becomes true.
- * Example: (a > b) is not true.
- *
- * - <
- * -
- *
Checks whether the value of the left operand is less than the value of the right
- * operand; if yes, then the condition becomes true.
- * Example: (a < b) is true.
- *
- * - >=
- * -
- *
Checks whether the value of the left operand is greater than or equal to the value
- * of the right operand; if yes, then the condition becomes true.
- * Example: (a >= b) is not true.
- *
- * - <=
- * -
- *
Checks whether the value of the left operand is less than or equal to the value of
- * the right operand; if yes, then the condition becomes true.
- * Example: (a <= b) is true.
- *
- * - AND, OR, IN, BETWEEN, LIKE, NOT, IS NULL
- * -
- *
Logical operators.
- *
- *
- *
- * Supported Partition Key Types: The following are the supported
- * partition keys.
- *
- * -
- *
- * string
- *
- *
- * -
- *
- * date
- *
- *
- * -
- *
- * timestamp
- *
- *
- * -
- *
- * int
- *
- *
- * -
- *
- * bigint
- *
- *
- * -
- *
- * long
- *
- *
- * -
- *
- * tinyint
- *
- *
- * -
- *
- * smallint
- *
- *
- * -
- *
- * decimal
- *
- *
- *
- * If an type is encountered that is not valid, an exception is thrown.
- */
- Expression?: string;
-
- /**
- * A structure containing Lake Formation audit context information.
- */
- AuditContext?: AuditContext;
-
- /**
- * A list of supported permission types.
- */
- SupportedPermissionTypes: (PermissionType | string)[] | undefined;
-
- /**
- * A continuation token, if this is not the first call to retrieve
- * these partitions.
- */
- NextToken?: string;
-
- /**
- * The segment of the table's partitions to scan in this request.
- */
- Segment?: Segment;
-
- /**
- * The maximum number of partitions to return in a single response.
- */
- MaxResults?: number;
-}
diff --git a/clients/client-glue/src/models/models_2.ts b/clients/client-glue/src/models/models_2.ts
index abd0061efb6f4..c2c0b231c75db 100644
--- a/clients/client-glue/src/models/models_2.ts
+++ b/clients/client-glue/src/models/models_2.ts
@@ -87,6 +87,8 @@ import {
SchemaId,
SelectFields,
SelectFromCollection,
+ SnowflakeSource,
+ SnowflakeTarget,
SourceControlAuthStrategy,
SourceControlDetails,
SourceControlProvider,
@@ -117,6 +119,7 @@ import {
SchemaStatus,
SchemaVersionNumber,
SchemaVersionStatus,
+ Segment,
Session,
Statement,
Table,
@@ -127,6 +130,204 @@ import {
UserDefinedFunctionInput,
} from "./models_1";
+/**
+ * @public
+ */
+export interface GetUnfilteredPartitionMetadataResponse {
+ /**
+ * A Partition object containing the partition metadata.
+ */
+ Partition?: Partition;
+
+ /**
+ * A list of column names that the user has been granted access to.
+ */
+ AuthorizedColumns?: string[];
+
+ /**
+ * A Boolean value that indicates whether the partition location is registered
+ * with Lake Formation.
+ */
+ IsRegisteredWithLakeFormation?: boolean;
+}
+
+/**
+ * @public
+ * The operation timed out.
+ */
+export class PermissionTypeMismatchException extends __BaseException {
+ readonly name: "PermissionTypeMismatchException" = "PermissionTypeMismatchException";
+ readonly $fault: "client" = "client";
+ /**
+ * There is a mismatch between the SupportedPermissionType used in the query request
+ * and the permissions defined on the target table.
+ */
+ Message?: string;
+ /**
+ * @internal
+ */
+ constructor(opts: __ExceptionOptionType) {
+ super({
+ name: "PermissionTypeMismatchException",
+ $fault: "client",
+ ...opts,
+ });
+ Object.setPrototypeOf(this, PermissionTypeMismatchException.prototype);
+ this.Message = opts.Message;
+ }
+}
+
+/**
+ * @public
+ */
+export interface GetUnfilteredPartitionsMetadataRequest {
+ /**
+ * The ID of the Data Catalog where the partitions in question reside. If none is provided,
+ * the AWS account ID is used by default.
+ */
+ CatalogId: string | undefined;
+
+ /**
+ * The name of the catalog database where the partitions reside.
+ */
+ DatabaseName: string | undefined;
+
+ /**
+ * The name of the table that contains the partition.
+ */
+ TableName: string | undefined;
+
+ /**
+ * An expression that filters the partitions to be returned.
+ * The expression uses SQL syntax similar to the SQL WHERE
filter clause. The
+ * SQL statement parser JSQLParser parses the expression.
+ *
+ * Operators: The following are the operators that you can use in the
+ * Expression
API call:
+ *
+ * - =
+ * -
+ *
Checks whether the values of the two operands are equal; if yes, then the condition becomes
+ * true.
+ * Example: Assume 'variable a' holds 10 and 'variable b' holds 20.
+ * (a = b) is not true.
+ *
+ * - < >
+ * -
+ *
Checks whether the values of two operands are equal; if the values are not equal,
+ * then the condition becomes true.
+ * Example: (a < > b) is true.
+ *
+ * - >
+ * -
+ *
Checks whether the value of the left operand is greater than the value of the right
+ * operand; if yes, then the condition becomes true.
+ * Example: (a > b) is not true.
+ *
+ * - <
+ * -
+ *
Checks whether the value of the left operand is less than the value of the right
+ * operand; if yes, then the condition becomes true.
+ * Example: (a < b) is true.
+ *
+ * - >=
+ * -
+ *
Checks whether the value of the left operand is greater than or equal to the value
+ * of the right operand; if yes, then the condition becomes true.
+ * Example: (a >= b) is not true.
+ *
+ * - <=
+ * -
+ *
Checks whether the value of the left operand is less than or equal to the value of
+ * the right operand; if yes, then the condition becomes true.
+ * Example: (a <= b) is true.
+ *
+ * - AND, OR, IN, BETWEEN, LIKE, NOT, IS NULL
+ * -
+ *
Logical operators.
+ *
+ *
+ *
+ * Supported Partition Key Types: The following are the supported
+ * partition keys.
+ *
+ * -
+ *
+ * string
+ *
+ *
+ * -
+ *
+ * date
+ *
+ *
+ * -
+ *
+ * timestamp
+ *
+ *
+ * -
+ *
+ * int
+ *
+ *
+ * -
+ *
+ * bigint
+ *
+ *
+ * -
+ *
+ * long
+ *
+ *
+ * -
+ *
+ * tinyint
+ *
+ *
+ * -
+ *
+ * smallint
+ *
+ *
+ * -
+ *
+ * decimal
+ *
+ *
+ *
+ * If an type is encountered that is not valid, an exception is thrown.
+ */
+ Expression?: string;
+
+ /**
+ * A structure containing Lake Formation audit context information.
+ */
+ AuditContext?: AuditContext;
+
+ /**
+ * A list of supported permission types.
+ */
+ SupportedPermissionTypes: (PermissionType | string)[] | undefined;
+
+ /**
+ * A continuation token, if this is not the first call to retrieve
+ * these partitions.
+ */
+ NextToken?: string;
+
+ /**
+ * The segment of the table's partitions to scan in this request.
+ */
+ Segment?: Segment;
+
+ /**
+ * The maximum number of partitions to return in a single response.
+ */
+ MaxResults?: number;
+}
+
/**
* @public
* A partition that contains unfiltered metadata.
@@ -4682,6 +4883,16 @@ export interface CodeGenConfigurationNode {
* Specifies a Glue DataBrew recipe node.
*/
Recipe?: Recipe;
+
+ /**
+ * Specifies a Snowflake data source.
+ */
+ SnowflakeSource?: SnowflakeSource;
+
+ /**
+ * Specifies a target that writes to a Snowflake data source.
+ */
+ SnowflakeTarget?: SnowflakeTarget;
}
/**
diff --git a/clients/client-glue/src/protocols/Aws_json1_1.ts b/clients/client-glue/src/protocols/Aws_json1_1.ts
index 64b659e8394f2..730ef0b062e16 100644
--- a/clients/client-glue/src/protocols/Aws_json1_1.ts
+++ b/clients/client-glue/src/protocols/Aws_json1_1.ts
@@ -553,7 +553,6 @@ import {
Filter,
FilterExpression,
FilterValue,
- FindMatchesParameters,
GlueEncryptionException,
GlueRecordType,
GlueSchema,
@@ -643,6 +642,9 @@ import {
SelectFromCollection,
SerDeInfo,
SkewedInfo,
+ SnowflakeNodeData,
+ SnowflakeSource,
+ SnowflakeTarget,
SourceControlDetails,
SparkConnectorSource,
SparkConnectorTarget,
@@ -731,6 +733,7 @@ import {
EncryptionConfiguration,
EvaluationMetrics,
FindMatchesMetrics,
+ FindMatchesParameters,
GetBlueprintRequest,
GetBlueprintResponse,
GetBlueprintRunRequest,
@@ -828,8 +831,6 @@ import {
GetTriggerRequest,
GetTriggersRequest,
GetUnfilteredPartitionMetadataRequest,
- GetUnfilteredPartitionMetadataResponse,
- GetUnfilteredPartitionsMetadataRequest,
GluePolicy,
GrokClassifier,
IcebergInput,
@@ -843,7 +844,6 @@ import {
OpenTableFormatInput,
PartitionIndex,
PermissionType,
- PermissionTypeMismatchException,
RegistryId,
ResourceUri,
S3Encryption,
@@ -892,6 +892,8 @@ import {
DevEndpointCustomLibraries,
GetJobResponse,
GetJobsResponse,
+ GetUnfilteredPartitionMetadataResponse,
+ GetUnfilteredPartitionsMetadataRequest,
GetUnfilteredPartitionsMetadataResponse,
GetUnfilteredTableMetadataRequest,
GetUnfilteredTableMetadataResponse,
@@ -940,6 +942,7 @@ import {
MetadataKeyValuePair,
MLTransformNotReadyException,
NoScheduleException,
+ PermissionTypeMismatchException,
PropertyPredicate,
PutDataCatalogEncryptionSettingsRequest,
PutResourcePolicyRequest,
@@ -15875,6 +15878,8 @@ const se_CodeGenConfigurationNode = (input: CodeGenConfigurationNode, context: _
S3ParquetSource: _json,
SelectFields: _json,
SelectFromCollection: _json,
+ SnowflakeSource: _json,
+ SnowflakeTarget: _json,
SparkConnectorSource: _json,
SparkConnectorTarget: _json,
SparkSQL: _json,
@@ -17161,6 +17166,12 @@ const se_PIIDetection = (input: PIIDetection, context: __SerdeContext): any => {
// se_SkewedInfo omitted.
+// se_SnowflakeNodeData omitted.
+
+// se_SnowflakeSource omitted.
+
+// se_SnowflakeTarget omitted.
+
// se_SortCriteria omitted.
// se_SortCriterion omitted.
@@ -17907,6 +17918,8 @@ const de_CodeGenConfigurationNode = (output: any, context: __SerdeContext): Code
S3ParquetSource: _json,
SelectFields: _json,
SelectFromCollection: _json,
+ SnowflakeSource: _json,
+ SnowflakeTarget: _json,
SparkConnectorSource: _json,
SparkConnectorTarget: _json,
SparkSQL: _json,
@@ -20373,6 +20386,12 @@ const de_SessionList = (output: any, context: __SerdeContext): Session[] => {
// de_SkewedInfo omitted.
+// de_SnowflakeNodeData omitted.
+
+// de_SnowflakeSource omitted.
+
+// de_SnowflakeTarget omitted.
+
// de_SourceControlDetails omitted.
// de_SparkConnectorSource omitted.
diff --git a/codegen/sdk-codegen/aws-models/glue.json b/codegen/sdk-codegen/aws-models/glue.json
index daf0a51fcb8b2..fc3ca241335ea 100644
--- a/codegen/sdk-codegen/aws-models/glue.json
+++ b/codegen/sdk-codegen/aws-models/glue.json
@@ -2164,7 +2164,7 @@
"Data": {
"target": "com.amazonaws.glue#AmazonRedshiftNodeData",
"traits": {
- "smithy.api#documentation": "Specifies the data of the Amazon Reshift target node.
"
+ "smithy.api#documentation": "Specifies the data of the Amazon Redshift target node.
"
}
},
"Inputs": {
@@ -5288,6 +5288,18 @@
"traits": {
"smithy.api#documentation": "Specifies a Glue DataBrew recipe node.
"
}
+ },
+ "SnowflakeSource": {
+ "target": "com.amazonaws.glue#SnowflakeSource",
+ "traits": {
+ "smithy.api#documentation": "Specifies a Snowflake data source.
"
+ }
+ },
+ "SnowflakeTarget": {
+ "target": "com.amazonaws.glue#SnowflakeTarget",
+ "traits": {
+ "smithy.api#documentation": "Specifies a target that writes to a Snowflake data source.
"
+ }
}
},
"traits": {
@@ -28880,6 +28892,198 @@
"smithy.api#documentation": "Specifies skewed values in a table. Skewed values are those that occur with very high\n frequency.
"
}
},
+ "com.amazonaws.glue#SnowflakeNodeData": {
+ "type": "structure",
+ "members": {
+ "SourceType": {
+ "target": "com.amazonaws.glue#GenericLimitedString",
+ "traits": {
+ "smithy.api#documentation": "Specifies how retrieved data is specified. Valid values: \"table\"
, \n \"query\"
.
"
+ }
+ },
+ "Connection": {
+ "target": "com.amazonaws.glue#Option",
+ "traits": {
+ "smithy.api#documentation": "Specifies a Glue Data Catalog Connection to a Snowflake endpoint.
"
+ }
+ },
+ "Schema": {
+ "target": "com.amazonaws.glue#GenericString",
+ "traits": {
+ "smithy.api#documentation": "Specifies a Snowflake database schema for your node to use.
"
+ }
+ },
+ "Table": {
+ "target": "com.amazonaws.glue#GenericString",
+ "traits": {
+ "smithy.api#documentation": "Specifies a Snowflake table for your node to use.
"
+ }
+ },
+ "Database": {
+ "target": "com.amazonaws.glue#GenericString",
+ "traits": {
+ "smithy.api#documentation": "Specifies a Snowflake database for your node to use.
"
+ }
+ },
+ "TempDir": {
+ "target": "com.amazonaws.glue#EnclosedInStringProperty",
+ "traits": {
+ "smithy.api#documentation": "Not currently used.
"
+ }
+ },
+ "IamRole": {
+ "target": "com.amazonaws.glue#Option",
+ "traits": {
+ "smithy.api#documentation": "Not currently used.
"
+ }
+ },
+ "AdditionalOptions": {
+ "target": "com.amazonaws.glue#AdditionalOptions",
+ "traits": {
+ "smithy.api#documentation": "Specifies additional options passed to the Snowflake connector. If options are specified\n elsewhere in this node, this will take precedence.
"
+ }
+ },
+ "SampleQuery": {
+ "target": "com.amazonaws.glue#GenericString",
+ "traits": {
+ "smithy.api#documentation": "A SQL string used to retrieve data with the query
sourcetype.
"
+ }
+ },
+ "PreAction": {
+ "target": "com.amazonaws.glue#GenericString",
+ "traits": {
+ "smithy.api#documentation": "A SQL string run before the Snowflake connector performs its standard actions.
"
+ }
+ },
+ "PostAction": {
+ "target": "com.amazonaws.glue#GenericString",
+ "traits": {
+ "smithy.api#documentation": "A SQL string run after the Snowflake connector performs its standard actions.
"
+ }
+ },
+ "Action": {
+ "target": "com.amazonaws.glue#GenericString",
+ "traits": {
+ "smithy.api#documentation": "Specifies what action to take when writing to a table with preexisting data. Valid values: \n append
, merge
, truncate
, drop
.
"
+ }
+ },
+ "Upsert": {
+ "target": "com.amazonaws.glue#BooleanValue",
+ "traits": {
+ "smithy.api#default": false,
+ "smithy.api#documentation": "Used when Action is append
. Specifies the resolution behavior when a row\n already exists. If true, preexisting rows will be updated. If false, those rows will be inserted.
"
+ }
+ },
+ "MergeAction": {
+ "target": "com.amazonaws.glue#GenericLimitedString",
+ "traits": {
+ "smithy.api#documentation": "Specifies a merge action. Valid values: simple
, custom
. If\n simple, merge behavior is defined by MergeWhenMatched
and \n MergeWhenNotMatched
. If custom, defined by MergeClause
.
"
+ }
+ },
+ "MergeWhenMatched": {
+ "target": "com.amazonaws.glue#GenericLimitedString",
+ "traits": {
+ "smithy.api#documentation": "Specifies how to resolve records that match preexisting data when merging. Valid values: \n update
, delete
.
"
+ }
+ },
+ "MergeWhenNotMatched": {
+ "target": "com.amazonaws.glue#GenericLimitedString",
+ "traits": {
+ "smithy.api#documentation": "Specifies how to process records that do not match preexisting data when merging. Valid\n values: insert
, none
.
"
+ }
+ },
+ "MergeClause": {
+ "target": "com.amazonaws.glue#GenericString",
+ "traits": {
+ "smithy.api#documentation": "A SQL statement that specifies a custom merge behavior.
"
+ }
+ },
+ "StagingTable": {
+ "target": "com.amazonaws.glue#GenericString",
+ "traits": {
+ "smithy.api#documentation": "The name of a staging table used when performing merge
or upsert append
\n actions. Data is written to this table, then moved to table
by a generated\n postaction.
"
+ }
+ },
+ "SelectedColumns": {
+ "target": "com.amazonaws.glue#OptionList",
+ "traits": {
+ "smithy.api#documentation": "Specifies the columns combined to identify a record when detecting matches for merges and\n upserts. A list of structures with value
, label
and \n description
keys. Each structure describes a column.
"
+ }
+ },
+ "AutoPushdown": {
+ "target": "com.amazonaws.glue#BooleanValue",
+ "traits": {
+ "smithy.api#default": false,
+ "smithy.api#documentation": "Specifies whether automatic query pushdown is enabled. If pushdown\n is enabled, then when a query is run on Spark, if part of the query can be \"pushed down\" to\n the\n Snowflake server, it is pushed down. This improves performance of some queries.
"
+ }
+ },
+ "TableSchema": {
+ "target": "com.amazonaws.glue#OptionList",
+ "traits": {
+ "smithy.api#documentation": "Manually defines the target schema for the node. A list of structures with value
\n , label
and description
keys. Each structure defines a column.
"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "Specifies configuration for Snowflake nodes in Glue Studio.
"
+ }
+ },
+ "com.amazonaws.glue#SnowflakeSource": {
+ "type": "structure",
+ "members": {
+ "Name": {
+ "target": "com.amazonaws.glue#NodeName",
+ "traits": {
+ "smithy.api#documentation": "The name of the Snowflake data source.
",
+ "smithy.api#required": {}
+ }
+ },
+ "Data": {
+ "target": "com.amazonaws.glue#SnowflakeNodeData",
+ "traits": {
+ "smithy.api#documentation": "Configuration for the Snowflake data source.
",
+ "smithy.api#required": {}
+ }
+ },
+ "OutputSchemas": {
+ "target": "com.amazonaws.glue#GlueSchemas",
+ "traits": {
+ "smithy.api#documentation": "Specifies user-defined schemas for your output data.
"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "Specifies a Snowflake data source.
"
+ }
+ },
+ "com.amazonaws.glue#SnowflakeTarget": {
+ "type": "structure",
+ "members": {
+ "Name": {
+ "target": "com.amazonaws.glue#NodeName",
+ "traits": {
+ "smithy.api#documentation": "The name of the Snowflake target.
",
+ "smithy.api#required": {}
+ }
+ },
+ "Data": {
+ "target": "com.amazonaws.glue#SnowflakeNodeData",
+ "traits": {
+ "smithy.api#documentation": "Specifies the data of the Snowflake target node.
",
+ "smithy.api#required": {}
+ }
+ },
+ "Inputs": {
+ "target": "com.amazonaws.glue#OneInput",
+ "traits": {
+ "smithy.api#documentation": "The nodes that are inputs to the data target.
"
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "Specifies a Snowflake target.
"
+ }
+ },
"com.amazonaws.glue#Sort": {
"type": "enum",
"members": {