diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 902ef3826..33d7fd814 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -1234,7 +1234,7 @@ "type": "array" }, "Workspace": { - "markdownDescription": "An Amazon Managed Service for Prometheus workspace is a logical and isolated Prometheus server dedicated to ingesting, storing, and querying your Prometheus-compatible metrics.", + "markdownDescription": "The ID of the workspace to add the rule groups namespace.", "title": "Workspace", "type": "string" } @@ -1267,6 +1267,185 @@ ], "type": "object" }, + "AWS::APS::Scraper": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Alias": { + "markdownDescription": "An optional user-assigned scraper alias.", + "title": "Alias", + "type": "string" + }, + "Destination": { + "$ref": "#/definitions/AWS::APS::Scraper.Destination", + "markdownDescription": "The Amazon Managed Service for Prometheus workspace the scraper sends metrics to.", + "title": "Destination" + }, + "ScrapeConfiguration": { + "$ref": "#/definitions/AWS::APS::Scraper.ScrapeConfiguration", + "markdownDescription": "The configuration in use by the scraper.", + "title": "ScrapeConfiguration" + }, + "Source": { + "$ref": "#/definitions/AWS::APS::Scraper.Source", + "markdownDescription": "The Amazon EKS cluster from which the scraper collects metrics.", + "title": "Source" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "(Optional) The list of tag keys and values associated with the scraper.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Destination", + "ScrapeConfiguration", + "Source" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::APS::Scraper" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::APS::Scraper.AmpConfiguration": { + "additionalProperties": false, + "properties": { + "WorkspaceArn": { + "markdownDescription": "ARN of the Amazon Managed Service for Prometheus workspace.", + "title": "WorkspaceArn", + "type": "string" + } + }, + "required": [ + "WorkspaceArn" + ], + "type": "object" + }, + "AWS::APS::Scraper.Destination": { + "additionalProperties": false, + "properties": { + "AmpConfiguration": { + "$ref": "#/definitions/AWS::APS::Scraper.AmpConfiguration", + "markdownDescription": "The Amazon Managed Service for Prometheus workspace to send metrics to.", + "title": "AmpConfiguration" + } + }, + "required": [ + "AmpConfiguration" + ], + "type": "object" + }, + "AWS::APS::Scraper.EksConfiguration": { + "additionalProperties": false, + "properties": { + "ClusterArn": { + "markdownDescription": "ARN of the Amazon EKS cluster.", + "title": "ClusterArn", + "type": "string" + }, + "SecurityGroupIds": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of the security group IDs for the Amazon EKS cluster VPC configuration.", + "title": "SecurityGroupIds", + "type": "array" + }, + "SubnetIds": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of subnet IDs for the Amazon EKS cluster VPC configuration.", + "title": "SubnetIds", + "type": "array" + } + }, + "required": [ + "ClusterArn", + "SubnetIds" + ], + "type": "object" + }, + "AWS::APS::Scraper.ScrapeConfiguration": { + "additionalProperties": false, + "properties": { + "ConfigurationBlob": { + "markdownDescription": "The base 64 encoded scrape configuration file.", + "title": "ConfigurationBlob", + "type": "string" + } + }, + "required": [ + "ConfigurationBlob" + ], + "type": "object" + }, + "AWS::APS::Scraper.Source": { + "additionalProperties": false, + "properties": { + "EksConfiguration": { + "$ref": "#/definitions/AWS::APS::Scraper.EksConfiguration", + "markdownDescription": "The Amazon EKS cluster from which a scraper collects metrics.", + "title": "EksConfiguration" + } + }, + "required": [ + "EksConfiguration" + ], + "type": "object" + }, "AWS::APS::Workspace": { "additionalProperties": false, "properties": { @@ -24936,7 +25115,7 @@ "type": "string" }, "Version": { - "markdownDescription": "Returns the version to use for the specified X12 transaction set. Supported versions are `4010` , `4030` , and `5010` .", + "markdownDescription": "Returns the version to use for the specified X12 transaction set.", "title": "Version", "type": "string" } @@ -25268,13 +25447,261 @@ "type": "string" }, "Version": { - "markdownDescription": "Returns the version to use for the specified X12 transaction set. Supported versions are `4010` , `4030` , and `5010` .", + "markdownDescription": "Returns the version to use for the specified X12 transaction set.", "title": "Version", "type": "string" } }, "type": "object" }, + "AWS::BCMDataExports::Export": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Export": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.Export", + "markdownDescription": "The details that are available for an export.", + "title": "Export" + }, + "Tags": { + "items": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.ResourceTag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Export" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::BCMDataExports::Export" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.DataQuery": { + "additionalProperties": false, + "properties": { + "QueryStatement": { + "markdownDescription": "The query statement.", + "title": "QueryStatement", + "type": "string" + }, + "TableConfigurations": { + "markdownDescription": "The table configuration.", + "title": "TableConfigurations", + "type": "object" + } + }, + "required": [ + "QueryStatement" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.DestinationConfigurations": { + "additionalProperties": false, + "properties": { + "S3Destination": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.S3Destination", + "markdownDescription": "An object that describes the destination of the data exports file.", + "title": "S3Destination" + } + }, + "required": [ + "S3Destination" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.Export": { + "additionalProperties": false, + "properties": { + "DataQuery": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.DataQuery", + "markdownDescription": "The data query for this specific data export.", + "title": "DataQuery" + }, + "Description": { + "markdownDescription": "The description for this specific data export.", + "title": "Description", + "type": "string" + }, + "DestinationConfigurations": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.DestinationConfigurations", + "markdownDescription": "The destination configuration for this specific data export.", + "title": "DestinationConfigurations" + }, + "ExportArn": { + "markdownDescription": "The Amazon Resource Name (ARN) for this export.", + "title": "ExportArn", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of this specific data export.", + "title": "Name", + "type": "string" + }, + "RefreshCadence": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.RefreshCadence", + "markdownDescription": "The cadence for AWS to update the export in your S3 bucket.", + "title": "RefreshCadence" + } + }, + "required": [ + "DataQuery", + "DestinationConfigurations", + "Name", + "RefreshCadence" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.RefreshCadence": { + "additionalProperties": false, + "properties": { + "Frequency": { + "markdownDescription": "The frequency that data exports are updated. The export refreshes each time the source data updates, up to three times daily.", + "title": "Frequency", + "type": "string" + } + }, + "required": [ + "Frequency" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.ResourceTag": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key that's associated with the tag.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value that's associated with the tag.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.S3Destination": { + "additionalProperties": false, + "properties": { + "S3Bucket": { + "markdownDescription": "The name of the Amazon S3 bucket used as the destination of a data export file.", + "title": "S3Bucket", + "type": "string" + }, + "S3OutputConfigurations": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.S3OutputConfigurations", + "markdownDescription": "The output configuration for the data export.", + "title": "S3OutputConfigurations" + }, + "S3Prefix": { + "markdownDescription": "The S3 path prefix you want prepended to the name of your data export.", + "title": "S3Prefix", + "type": "string" + }, + "S3Region": { + "markdownDescription": "The S3 bucket Region.", + "title": "S3Region", + "type": "string" + } + }, + "required": [ + "S3Bucket", + "S3OutputConfigurations", + "S3Prefix", + "S3Region" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.S3OutputConfigurations": { + "additionalProperties": false, + "properties": { + "Compression": { + "markdownDescription": "The compression type for the data export.", + "title": "Compression", + "type": "string" + }, + "Format": { + "markdownDescription": "The file format for the data export.", + "title": "Format", + "type": "string" + }, + "OutputType": { + "markdownDescription": "The output type for the data export.", + "title": "OutputType", + "type": "string" + }, + "Overwrite": { + "markdownDescription": "The rule to follow when generating a version of the data export file. You have the choice to overwrite the previous version or to be delivered in addition to the previous versions. Overwriting exports can save on Amazon S3 storage costs. Creating new export versions allows you to track the changes in cost and usage data over time.", + "title": "Overwrite", + "type": "string" + } + }, + "required": [ + "Compression", + "Format", + "OutputType", + "Overwrite" + ], + "type": "object" + }, "AWS::Backup::BackupPlan": { "additionalProperties": false, "properties": { @@ -26314,7 +26741,7 @@ "items": { "type": "string" }, - "markdownDescription": "These are the types of recovery points.", + "markdownDescription": "These are the types of recovery points.\n\nInclude `SNAPSHOT` to restore only snapshot recovery points; include `CONTINUOUS` to restore continuous recovery points (point in time restore / PITR); use both to restore either a snapshot or a continuous recovery point. The recovery point will be determined by the value for `Algorithm` .", "title": "RecoveryPointTypes", "type": "array" }, @@ -31649,6 +32076,11 @@ "markdownDescription": "The IDs of the AWS accounts that are allowed to query by the custom analysis rule. Required when `allowedAnalyses` is `ANY_QUERY` .", "title": "AllowedAnalysisProviders", "type": "array" + }, + "DifferentialPrivacy": { + "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.DifferentialPrivacy", + "markdownDescription": "The differential privacy configuration.", + "title": "DifferentialPrivacy" } }, "required": [ @@ -31725,6 +32157,37 @@ }, "type": "object" }, + "AWS::CleanRooms::ConfiguredTable.DifferentialPrivacy": { + "additionalProperties": false, + "properties": { + "Columns": { + "items": { + "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.DifferentialPrivacyColumn" + }, + "markdownDescription": "", + "title": "Columns", + "type": "array" + } + }, + "required": [ + "Columns" + ], + "type": "object" + }, + "AWS::CleanRooms::ConfiguredTable.DifferentialPrivacyColumn": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the column, such as user_id, that contains the unique identifier of your users, whose privacy you want to protect. If you want to turn on differential privacy for two or more tables in a collaboration, you must configure the same column as the user identifier column in both analysis rules.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, "AWS::CleanRooms::ConfiguredTable.GlueTableReference": { "additionalProperties": false, "properties": { @@ -38154,6 +38617,8 @@ "additionalProperties": false, "properties": { "AccountId": { + "markdownDescription": "If the CloudWatch metric that provides the time series that the anomaly detector uses as input is in another account, specify that account ID here. If you omit this parameter, the current account is used.", + "title": "AccountId", "type": "string" }, "Dimensions": { @@ -39635,7 +40100,7 @@ "type": "string" }, "SourceVersion": { - "markdownDescription": "The source version for the corresponding source identifier. If specified, must be one of:\n\n- For CodeCommit: the commit ID, branch, or Git tag to use.\n- For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format `pr/pull-request-ID` (for example, `pr/25` ). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Amazon S3: the version ID of the object that represents the build input ZIP file to use.\n\nFor more information, see [Source Version Sample with CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) in the *AWS CodeBuild User Guide* .", + "markdownDescription": "The source version for the corresponding source identifier. If specified, must be one of:\n\n- For CodeCommit: the commit ID, branch, or Git tag to use.\n- For GitHub or GitLab: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format `pr/pull-request-ID` (for example, `pr/25` ). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Amazon S3: the version ID of the object that represents the build input ZIP file to use.\n\nFor more information, see [Source Version Sample with CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) in the *AWS CodeBuild User Guide* .", "title": "SourceVersion", "type": "string" } @@ -39747,7 +40212,7 @@ "type": "boolean" }, "Location": { - "markdownDescription": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", + "markdownDescription": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeStar Connections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", "title": "Location", "type": "string" }, @@ -39762,7 +40227,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub or GitHub Enterprise Cloud repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket.", + "markdownDescription": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `GITLAB` : The source code is in a GitLab repository.\n- `GITLAB_SELF_MANAGED` : The source code is in a self-managed GitLab repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket.", "title": "Type", "type": "string" } @@ -40028,12 +40493,12 @@ "additionalProperties": false, "properties": { "AuthType": { - "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, or PERSONAL_ACCESS_TOKEN.", + "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", "title": "AuthType", "type": "string" }, "ServerType": { - "markdownDescription": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, or BITBUCKET.", + "markdownDescription": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET.", "title": "ServerType", "type": "string" }, @@ -42717,6 +43182,11 @@ "title": "ConfigFile", "type": "string" }, + "PublishDeploymentStatus": { + "markdownDescription": "Whether to enable or disable publishing of deployment status to source providers.", + "title": "PublishDeploymentStatus", + "type": "string" + }, "RepositoryLinkId": { "markdownDescription": "The ID of the repository link associated with a specific sync configuration.", "title": "RepositoryLinkId", @@ -42736,6 +43206,11 @@ "markdownDescription": "The type of sync for a specific sync configuration.", "title": "SyncType", "type": "string" + }, + "TriggerResourceUpdateOn": { + "markdownDescription": "When to trigger Git sync to begin the stack update.", + "title": "TriggerResourceUpdateOn", + "type": "string" } }, "required": [ @@ -55241,7 +55716,7 @@ "type": "string" }, "CaptureDdls": { - "markdownDescription": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nIf this value is set to `N` , you don't have to create tables or triggers on the source database.", + "markdownDescription": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nThe default value is `true` .\n\nIf this value is set to `N` , you don't have to create tables or triggers on the source database.", "title": "CaptureDdls", "type": "boolean" }, @@ -55251,7 +55726,7 @@ "type": "string" }, "DdlArtifactsSchema": { - "markdownDescription": "The schema in which the operational DDL database artifacts are created.\n\nExample: `ddlArtifactsSchema=xyzddlschema;`", + "markdownDescription": "The schema in which the operational DDL database artifacts are created.\n\nThe default value is `public` .\n\nExample: `ddlArtifactsSchema=xyzddlschema;`", "title": "DdlArtifactsSchema", "type": "string" }, @@ -55261,37 +55736,37 @@ "type": "number" }, "FailTasksOnLobTruncation": { - "markdownDescription": "When set to `true` , this value causes a task to fail if the actual size of a LOB column is greater than the specified `LobMaxSize` .\n\nIf task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.", + "markdownDescription": "When set to `true` , this value causes a task to fail if the actual size of a LOB column is greater than the specified `LobMaxSize` .\n\nThe default value is `false` .\n\nIf task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.", "title": "FailTasksOnLobTruncation", "type": "boolean" }, "HeartbeatEnable": { - "markdownDescription": "The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps `restart_lsn` moving and prevents storage full scenarios.", + "markdownDescription": "The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps `restart_lsn` moving and prevents storage full scenarios.\n\nThe default value is `false` .", "title": "HeartbeatEnable", "type": "boolean" }, "HeartbeatFrequency": { - "markdownDescription": "Sets the WAL heartbeat frequency (in minutes).", + "markdownDescription": "Sets the WAL heartbeat frequency (in minutes).\n\nThe default value is 5 minutes.", "title": "HeartbeatFrequency", "type": "number" }, "HeartbeatSchema": { - "markdownDescription": "Sets the schema in which the heartbeat artifacts are created.", + "markdownDescription": "Sets the schema in which the heartbeat artifacts are created.\n\nThe default value is `public` .", "title": "HeartbeatSchema", "type": "string" }, "MapBooleanAsBoolean": { - "markdownDescription": "When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as `varchar(5)` . You must set this setting on both the source and target endpoints for it to take effect.", + "markdownDescription": "When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as `varchar(5)` . You must set this setting on both the source and target endpoints for it to take effect.\n\nThe default value is `false` .", "title": "MapBooleanAsBoolean", "type": "boolean" }, "MaxFileSize": { - "markdownDescription": "Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.\n\nExample: `maxFileSize=512`", + "markdownDescription": "Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.\n\nThe default value is 32,768 KB (32 MB).\n\nExample: `maxFileSize=512`", "title": "MaxFileSize", "type": "number" }, "PluginName": { - "markdownDescription": "Specifies the plugin to use to create a replication slot.", + "markdownDescription": "Specifies the plugin to use to create a replication slot.\n\nThe default value is `pglogical` .", "title": "PluginName", "type": "string" }, @@ -61048,21 +61523,6 @@ }, "type": "object" }, - "AWS::DataSync::Task.S3": { - "additionalProperties": false, - "properties": { - "BucketAccessRoleArn": { - "type": "string" - }, - "S3BucketArn": { - "type": "string" - }, - "Subdirectory": { - "type": "string" - } - }, - "type": "object" - }, "AWS::DataSync::Task.Skipped": { "additionalProperties": false, "properties": { @@ -63653,6 +64113,11 @@ "title": "AuthType", "type": "string" }, + "BackupRetentionPeriod": { + "markdownDescription": "The number of days for which automatic snapshots are retained.", + "title": "BackupRetentionPeriod", + "type": "number" + }, "ClusterName": { "markdownDescription": "The name of the new elastic cluster. This parameter is stored as a lowercase string.\n\n*Constraints* :\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- The first character must be a letter.\n- Cannot end with a hyphen or contain two consecutive hyphens.\n\n*Example* : `my-cluster`", "title": "ClusterName", @@ -63663,6 +64128,11 @@ "title": "KmsKeyId", "type": "string" }, + "PreferredBackupWindow": { + "markdownDescription": "The daily time range during which automated backups are created if automated backups are enabled, as determined by `backupRetentionPeriod` .", + "title": "PreferredBackupWindow", + "type": "string" + }, "PreferredMaintenanceWindow": { "markdownDescription": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\n*Format* : `ddd:hh24:mi-ddd:hh24:mi`\n\n*Default* : a 30-minute window selected at random from an 8-hour block of time for each AWS Region , occurring on a random day of the week.\n\n*Valid days* : Mon, Tue, Wed, Thu, Fri, Sat, Sun\n\n*Constraints* : Minimum 30-minute window.", "title": "PreferredMaintenanceWindow", @@ -63678,6 +64148,11 @@ "title": "ShardCount", "type": "number" }, + "ShardInstanceCount": { + "markdownDescription": "The number of replica instances applying to all shards in the cluster. A `shardInstanceCount` value of 1 means there is one writer instance, and any additional instances are replicas that can be used for reads and to improve availability.", + "title": "ShardInstanceCount", + "type": "number" + }, "SubnetIds": { "items": { "type": "string" @@ -68311,7 +68786,7 @@ "type": "array" }, "SubnetId": { - "markdownDescription": "The ID of the subnet to launch the instance into.\n\nIf you specify a network interface, you must specify any subnets as part of the network interface.", + "markdownDescription": "The ID of the subnet to launch the instance into.\n\nIf you specify a network interface, you must specify any subnets as part of the network interface instead of using this parameter.", "title": "SubnetId", "type": "string" }, @@ -68664,7 +69139,7 @@ "type": "number" }, "SubnetId": { - "markdownDescription": "The ID of the subnet associated with the network interface. Applies only if creating a network interface when launching an instance.", + "markdownDescription": "The ID of the subnet associated with the network interface.", "title": "SubnetId", "type": "string" } @@ -69705,7 +70180,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.NetworkInterface" }, - "markdownDescription": "One or more network interfaces. If you specify a network interface, you must specify any security groups and subnets as part of the network interface.", + "markdownDescription": "The network interfaces for the instance.", "title": "NetworkInterfaces", "type": "array" }, @@ -69728,7 +70203,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups. You can specify the IDs of existing security groups and references to resources created by the stack template.", + "markdownDescription": "The IDs of the security groups. You can specify the IDs of existing security groups and references to resources created by the stack template.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead.", "title": "SecurityGroupIds", "type": "array" }, @@ -69736,7 +70211,7 @@ "items": { "type": "string" }, - "markdownDescription": "One or more security group names. For a nondefault VPC, you must use security group IDs instead.", + "markdownDescription": "The names of the security groups. For a nondefault VPC, you must use security group IDs instead.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead of using this parameter.", "title": "SecurityGroups", "type": "array" }, @@ -69809,11 +70284,6 @@ "markdownDescription": "Disables the automatic recovery behavior of your instance or sets it to default.", "title": "AutoRecovery", "type": "string" - }, - "RebootMigration": { - "markdownDescription": "", - "title": "RebootMigration", - "type": "string" } }, "type": "object" @@ -73911,7 +74381,7 @@ "type": "number" }, "SubnetId": { - "markdownDescription": "The ID of the subnet associated with the network interface. Applies only if creating a network interface when launching an instance.", + "markdownDescription": "The ID of the subnet associated with the network interface.", "title": "SubnetId", "type": "string" } @@ -74298,7 +74768,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::SpotFleet.InstanceNetworkInterfaceSpecification" }, - "markdownDescription": "One or more network interfaces. If you specify a network interface, you must specify subnet IDs and security group IDs using the network interface.\n\n> `SpotFleetLaunchSpecification` currently does not support Elastic Fabric Adapter (EFA). To specify an EFA, you must use [LaunchTemplateConfig](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html) .", + "markdownDescription": "The network interfaces.", "title": "NetworkInterfaces", "type": "array" }, @@ -74316,7 +74786,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::SpotFleet.GroupIdentifier" }, - "markdownDescription": "The security groups.", + "markdownDescription": "The security groups.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead of using this parameter.", "title": "SecurityGroups", "type": "array" }, @@ -74326,7 +74796,7 @@ "type": "string" }, "SubnetId": { - "markdownDescription": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".", + "markdownDescription": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".\n\nIf you specify a network interface, you must specify any subnets as part of the network interface instead of using this parameter.", "title": "SubnetId", "type": "string" }, @@ -74673,6 +75143,11 @@ "title": "EnableDns64", "type": "boolean" }, + "EnableLniAtDeviceIndex": { + "markdownDescription": "Indicates the device position for local network interfaces in this subnet. For example, `1` indicates local network interfaces in this subnet are the secondary network interface (eth1).", + "title": "EnableLniAtDeviceIndex", + "type": "number" + }, "Ipv4IpamPoolId": { "markdownDescription": "An IPv4 IPAM pool ID for the subnet.", "title": "Ipv4IpamPoolId", @@ -80141,7 +80616,7 @@ "type": "number" }, "MinimumHealthyPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", "title": "MinimumHealthyPercent", "type": "number" } @@ -91371,7 +91846,6 @@ "required": [ "IdMappingTechniques", "InputSourceConfig", - "OutputSourceConfig", "RoleArn", "WorkflowName" ], @@ -91426,11 +91900,15 @@ "markdownDescription": "The ARN (Amazon Resource Name) that AWS Entity Resolution generated for the `SchemaMapping` .", "title": "SchemaArn", "type": "string" + }, + "Type": { + "markdownDescription": "", + "title": "Type", + "type": "string" } }, "required": [ - "InputSourceARN", - "SchemaArn" + "InputSourceARN" ], "type": "object" }, @@ -91737,7 +92215,7 @@ "title": "ProviderProperties" }, "ResolutionType": { - "markdownDescription": "The type of matching. There are two types of matching: `RULE_MATCHING` and `ML_MATCHING` .", + "markdownDescription": "The type of matching. There are three types of matching: `RULE_MATCHING` , `ML_MATCHING` , and `PROVIDER` .", "title": "ResolutionType", "type": "string" }, @@ -100418,7 +100896,7 @@ "additionalProperties": false, "properties": { "AttachmentArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints to accelerators.", + "markdownDescription": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints.", "title": "AttachmentArn", "type": "string" }, @@ -115103,7 +115581,7 @@ "title": "HealthEventsConfig" }, "IncludeLinkedAccounts": { - "markdownDescription": "", + "markdownDescription": "A boolean option that you can set to `TRUE` to include monitors for linked accounts in a list of monitors, when you've set up cross-account sharing in Internet Monitor. You configure cross-account sharing by using Amazon CloudWatch Observability Access Manager. For more information, see [Internet Monitor cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cwim-cross-account.html) in the Amazon CloudWatch User Guide.", "title": "IncludeLinkedAccounts", "type": "boolean" }, @@ -115113,7 +115591,7 @@ "title": "InternetMeasurementsLogDelivery" }, "LinkedAccountId": { - "markdownDescription": "", + "markdownDescription": "The account ID for an account that you've set up cross-account sharing for in Internet Monitor. You configure cross-account sharing by using Amazon CloudWatch Observability Access Manager. For more information, see [Internet Monitor cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cwim-cross-account.html) in the Amazon CloudWatch User Guide.", "title": "LinkedAccountId", "type": "string" }, @@ -124949,12 +125427,12 @@ "type": "array" }, "Description": { - "markdownDescription": "The description of the composite model.", + "markdownDescription": "The description of the composite model.\n\n> If the composite model is a `component-model-based` composite model, the description is inherited from the `COMPONENT_MODEL` asset model and cannot be changed.", "title": "Description", "type": "string" }, "ExternalId": { - "markdownDescription": "The external ID of a composite model on this asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The external ID of a composite model on this asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `Path` must be specified.", "title": "ExternalId", "type": "string" }, @@ -124969,7 +125447,7 @@ "type": "string" }, "ParentAssetModelCompositeModelExternalId": { - "markdownDescription": "The external ID of the parent asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> If `ParentCompositeModelExternalId` is specified, this value overrides the value of `ExternalId` , if both are included.", + "markdownDescription": "The external ID of the parent composite model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", "title": "ParentAssetModelCompositeModelExternalId", "type": "string" }, @@ -124977,7 +125455,7 @@ "items": { "type": "string" }, - "markdownDescription": "The structured path to the property from the root of the asset using property names. Path is used as the ID if the asset model is a derived composite model.", + "markdownDescription": "The structured path to the property from the root of the asset using property names. Path is used as the ID if the asset model is a derived composite model.\n\n> One of `ExternalId` or `Path` must be specified.", "title": "Path", "type": "array" }, @@ -125002,7 +125480,7 @@ "type": "string" }, "ExternalId": { - "markdownDescription": "The external ID (if any) provided in the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) operation. You can assign an external ID by specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) . However, you can't change the external ID if one is already assigned. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The external ID (if any) provided in the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) operation. You can assign an external ID by specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) . However, you can't change the external ID if one is already assigned. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "title": "ExternalId", "type": "string" }, @@ -125012,7 +125490,7 @@ "type": "string" }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .", + "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "title": "LogicalId", "type": "string" }, @@ -125042,7 +125520,7 @@ "type": "string" }, "ExternalId": { - "markdownDescription": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "title": "ExternalId", "type": "string" }, @@ -125052,7 +125530,7 @@ "type": "string" }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model property.", + "markdownDescription": "The `LogicalID` of the asset model property.\n\n> One of `ExternalId` or `LogicalId` must be specified.", "title": "LogicalId", "type": "string" }, @@ -130237,7 +130715,7 @@ "type": "number" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager . The credentials should be a user/password pair. For more information, see [Using a Database Data Source](https://docs.aws.amazon.com/kendra/latest/dg/data-source-database.html) . For more information about AWS Secrets Manager , see [What Is AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) in the *AWS Secrets Manager* user guide.", + "markdownDescription": "The Amazon Resource Name (ARN) of an AWS Secrets Manager secret that stores the credentials. The credentials should be a user-password pair. For more information, see [Using a Database Data Source](https://docs.aws.amazon.com/kendra/latest/dg/data-source-database.html) . For more information about AWS Secrets Manager , see [What Is AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) in the *AWS Secrets Manager* user guide.", "title": "SecretArn", "type": "string" }, @@ -130278,7 +130756,7 @@ "title": "PreExtractionHookConfiguration" }, "RoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of a role with permission to run `PreExtractionHookConfiguration` and `PostExtractionHookConfiguration` for altering document metadata and content during the document ingestion process. For more information, see [IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", + "markdownDescription": "The Amazon Resource Name (ARN) of an IAM role with permission to run `PreExtractionHookConfiguration` and `PostExtractionHookConfiguration` for altering document metadata and content during the document ingestion process. For more information, see [an IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", "title": "RoleArn", "type": "string" } @@ -130593,7 +131071,7 @@ "title": "InvocationCondition" }, "LambdaArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of a role with permission to run a Lambda function during ingestion. For more information, see [IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", + "markdownDescription": "The Amazon Resource Name (ARN) of an IAM role with permission to run a Lambda function during ingestion. For more information, see [an IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", "title": "LambdaArn", "type": "string" }, @@ -130692,7 +131170,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of users whose documents should be indexed. Specify the user names in email format, for example, `username@tenantdomain` . If you need to index the documents of more than 100 users, use the `OneDriveUserS3Path` field to specify the location of a file containing a list of users.", + "markdownDescription": "A list of users whose documents should be indexed. Specify the user names in email format, for example, `username@tenantdomain` . If you need to index the documents of more than 10 users, use the `OneDriveUserS3Path` field to specify the location of a file containing a list of users.", "title": "OneDriveUserList", "type": "array" }, @@ -130708,7 +131186,7 @@ "additionalProperties": false, "properties": { "Credentials": { - "markdownDescription": "Your secret ARN, which you can create in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nThe credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.", + "markdownDescription": "The Amazon Resource Name (ARN) of an AWS Secrets Manager secret. You create a secret to store your credentials in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nThe credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.", "title": "Credentials", "type": "string" }, @@ -131292,7 +131770,7 @@ "additionalProperties": false, "properties": { "Credentials": { - "markdownDescription": "Your secret ARN, which you can create in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nYou use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.", + "markdownDescription": "The Amazon Resource Name (ARN) of an AWS Secrets Manager secret. You create a secret to store your credentials in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nYou use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.", "title": "Credentials", "type": "string" }, @@ -149642,7 +150120,7 @@ "type": "string" }, "NetworkId": { - "markdownDescription": "The unique identifier of the network for the node.\n\nEthereum public networks have the following `NetworkId` s:\n\n- `n-ethereum-mainnet`\n- `n-ethereum-goerli`", + "markdownDescription": "The unique identifier of the network for the node.\n\nEthereum public networks have the following `NetworkId` s:\n\n- `n-ethereum-mainnet`", "title": "NetworkId", "type": "string" }, @@ -165961,7 +166439,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of strings that define which types of data that the source account shares with the monitoring account. Valid values are `AWS::CloudWatch::Metric | AWS::Logs::LogGroup | AWS::XRay::Trace | AWS::ApplicationInsights::Application` .", + "markdownDescription": "An array of strings that define which types of data that the source account shares with the monitoring account. Valid values are `AWS::CloudWatch::Metric | AWS::Logs::LogGroup | AWS::XRay::Trace | AWS::ApplicationInsights::Application | AWS::InternetMonitor::Monitor` .", "title": "ResourceTypes", "type": "array" }, @@ -176931,7 +177409,7 @@ }, "SelfManagedKafkaParameters": { "$ref": "#/definitions/AWS::Pipes::Pipe.PipeSourceSelfManagedKafkaParameters", - "markdownDescription": "The parameters for using a stream as a source.", + "markdownDescription": "The parameters for using a stream as a source.\n\nA *self managed* cluster refers to any Apache Kafka cluster not hosted by AWS . This includes both clusters you manage yourself, as well as those hosted by a third-party provider, such as [Confluent Cloud](https://docs.aws.amazon.com/https://www.confluent.io/) , [CloudKarafka](https://docs.aws.amazon.com/https://www.cloudkarafka.com/) , or [Redpanda](https://docs.aws.amazon.com/https://redpanda.com/) . For more information, see [Apache Kafka streams as a source](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-kafka.html) in the *Amazon EventBridge User Guide* .", "title": "SelfManagedKafkaParameters" }, "SqsQueueParameters": { @@ -218808,12 +219286,12 @@ }, "ScalingConfiguration": { "$ref": "#/definitions/AWS::RDS::DBCluster.ScalingConfiguration", - "markdownDescription": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The scaling configuration of an Aurora Serverless v1 DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora Serverless v1 DB clusters only", "title": "ScalingConfiguration" }, "ServerlessV2ScalingConfiguration": { "$ref": "#/definitions/AWS::RDS::DBCluster.ServerlessV2ScalingConfiguration", - "markdownDescription": "The `ServerlessV2ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora Serverless v2 DB clusters only", "title": "ServerlessV2ScalingConfiguration" }, "SnapshotIdentifier": { @@ -220673,6 +221151,16 @@ "title": "AdditionalEncryptionContext", "type": "object" }, + "DataFilter": { + "markdownDescription": "Data filters for the integration. These filters determine which tables from the source database are sent to the target Amazon Redshift data warehouse.", + "title": "DataFilter", + "type": "string" + }, + "Description": { + "markdownDescription": "A description of the integration.", + "title": "Description", + "type": "string" + }, "IntegrationName": { "markdownDescription": "The name of the integration.", "title": "IntegrationName", @@ -236938,7 +237426,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the contacts to add to the rotation.\n\nThe order in which you list the contacts is their shift order in the rotation schedule.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the contacts to add to the rotation.\n\n> Only the `PERSONAL` contact type is supported. The contact types `ESCALATION` and `ONCALL_SCHEDULE` are not supported for this operation. \n\nThe order in which you list the contacts is their shift order in the rotation schedule.", "title": "ContactIds", "type": "array" }, @@ -236966,7 +237454,7 @@ "type": "array" }, "TimeZoneId": { - "markdownDescription": "The time zone to base the rotation\u2019s activity on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"UTC\", or \"Asia/Seoul\". For more information, see the [Time Zone Database](https://docs.aws.amazon.com/https://www.iana.org/time-zones) on the IANA website.", + "markdownDescription": "The time zone to base the rotation\u2019s activity on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"UTC\", or \"Asia/Seoul\". For more information, see the [Time Zone Database](https://docs.aws.amazon.com/https://www.iana.org/time-zones) on the IANA website.\n\n> Designators for time zones that don\u2019t support Daylight Savings Time rules, such as Pacific Standard Time (PST), are not supported.", "title": "TimeZoneId", "type": "string" } @@ -248884,7 +249372,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when this finding record was created.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when this finding record was created.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "CreatedAt", "type": "array" }, @@ -248908,7 +249396,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "FirstObservedAt", "type": "array" }, @@ -248932,7 +249420,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "LastObservedAt", "type": "array" }, @@ -248948,7 +249436,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "The timestamp of when the note was updated. Uses the date-time format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "The timestamp of when the note was updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "NoteUpdatedAt", "type": "array" }, @@ -249084,7 +249572,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the finding record was most recently updated.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the finding record was most recently updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "UpdatedAt", "type": "array" }, @@ -249124,12 +249612,12 @@ "title": "DateRange" }, "End": { - "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nA correctly formatted example is `2020-05-21T20:16:34.724Z` . The value cannot contain spaces, and date and time should be separated by `T` . For more information, see [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc3339#section-5.6) .", + "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", "title": "End", "type": "string" }, "Start": { - "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nA correctly formatted example is `2020-05-21T20:16:34.724Z` . The value cannot contain spaces, and date and time should be separated by `T` . For more information, see [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc3339#section-5.6) .", + "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", "title": "Start", "type": "string" } @@ -249476,6 +249964,142 @@ ], "type": "object" }, + "AWS::SecurityLake::DataLake": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "EncryptionConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.EncryptionConfiguration" + }, + "LifecycleConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.LifecycleConfiguration" + }, + "MetaStoreManagerRoleArn": { + "type": "string" + }, + "ReplicationConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.ReplicationConfiguration" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityLake::DataLake" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::SecurityLake::DataLake.EncryptionConfiguration": { + "additionalProperties": false, + "properties": { + "KmsKeyId": { + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.Expiration": { + "additionalProperties": false, + "properties": { + "Days": { + "type": "number" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.LifecycleConfiguration": { + "additionalProperties": false, + "properties": { + "Expiration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.Expiration" + }, + "Transitions": { + "items": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.Transitions" + }, + "type": "array" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.ReplicationConfiguration": { + "additionalProperties": false, + "properties": { + "Regions": { + "items": { + "type": "string" + }, + "type": "array" + }, + "RoleArn": { + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.Transitions": { + "additionalProperties": false, + "properties": { + "Days": { + "type": "number" + }, + "StorageClass": { + "type": "string" + } + }, + "type": "object" + }, "AWS::ServiceCatalog::AcceptedPortfolioShare": { "additionalProperties": false, "properties": { @@ -273424,6 +274048,9 @@ { "$ref": "#/definitions/AWS::APS::RuleGroupsNamespace" }, + { + "$ref": "#/definitions/AWS::APS::Scraper" + }, { "$ref": "#/definitions/AWS::APS::Workspace" }, @@ -273763,6 +274390,9 @@ { "$ref": "#/definitions/AWS::B2BI::Transformer" }, + { + "$ref": "#/definitions/AWS::BCMDataExports::Export" + }, { "$ref": "#/definitions/AWS::Backup::BackupPlan" }, @@ -276712,6 +277342,9 @@ { "$ref": "#/definitions/AWS::SecurityHub::Standard" }, + { + "$ref": "#/definitions/AWS::SecurityLake::DataLake" + }, { "$ref": "#/definitions/AWS::ServiceCatalog::AcceptedPortfolioShare" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 9d0f99cab..5c019c39f 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -206,12 +206,40 @@ "Data": "The rules file used in the namespace.\n\nFor more details about the rules file, see [Creating a rules file](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-ruler-rulesfile.html) in the *Amazon Managed Service for Prometheus User Guide* .", "Name": "The name of the rule groups namespace.", "Tags": "The list of tag keys and values that are associated with the rule groups namespace.", - "Workspace": "An Amazon Managed Service for Prometheus workspace is a logical and isolated Prometheus server dedicated to ingesting, storing, and querying your Prometheus-compatible metrics." + "Workspace": "The ID of the workspace to add the rule groups namespace." }, "AWS::APS::RuleGroupsNamespace Tag": { "Key": "The key of the tag. Must not begin with `aws:` .", "Value": "The value of the tag." }, + "AWS::APS::Scraper": { + "Alias": "An optional user-assigned scraper alias.", + "Destination": "The Amazon Managed Service for Prometheus workspace the scraper sends metrics to.", + "ScrapeConfiguration": "The configuration in use by the scraper.", + "Source": "The Amazon EKS cluster from which the scraper collects metrics.", + "Tags": "(Optional) The list of tag keys and values associated with the scraper." + }, + "AWS::APS::Scraper AmpConfiguration": { + "WorkspaceArn": "ARN of the Amazon Managed Service for Prometheus workspace." + }, + "AWS::APS::Scraper Destination": { + "AmpConfiguration": "The Amazon Managed Service for Prometheus workspace to send metrics to." + }, + "AWS::APS::Scraper EksConfiguration": { + "ClusterArn": "ARN of the Amazon EKS cluster.", + "SecurityGroupIds": "A list of the security group IDs for the Amazon EKS cluster VPC configuration.", + "SubnetIds": "A list of subnet IDs for the Amazon EKS cluster VPC configuration." + }, + "AWS::APS::Scraper ScrapeConfiguration": { + "ConfigurationBlob": "The base 64 encoded scrape configuration file." + }, + "AWS::APS::Scraper Source": { + "EksConfiguration": "The Amazon EKS cluster from which a scraper collects metrics." + }, + "AWS::APS::Scraper Tag": { + "Key": "The key of the tag. Must not begin with `aws:` .", + "Value": "The value of the tag." + }, "AWS::APS::Workspace": { "AlertManagerDefinition": "The alert manager definition, a YAML configuration for the alert manager in your Amazon Managed Service for Prometheus workspace.\n\nFor details about the alert manager definition, see [Creating an alert manager configuration files](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-alertmanager-config.html) in the *Amazon Managed Service for Prometheus User Guide* .\n\nThe following example shows part of a CloudFormation YAML file with an embedded alert manager definition (following the `- |-` ).\n\n`Workspace: Type: AWS::APS::Workspace .... Properties: .... AlertManagerDefinition: Fn::Sub: - |- alertmanager_config: | templates: - 'default_template' route: receiver: example-sns receivers: - name: example-sns sns_configs: - topic_arn: 'arn:aws:sns:${AWS::Region}:${AWS::AccountId}:${TopicName}' -`", "Alias": "The alias that is assigned to this workspace to help identify it. It does not need to be unique.", @@ -3968,7 +3996,7 @@ }, "AWS::B2BI::Capability X12Details": { "TransactionSet": "Returns an enumerated type where each value identifies an X12 transaction set. Transaction sets are maintained by the X12 Accredited Standards Committee.", - "Version": "Returns the version to use for the specified X12 transaction set. Supported versions are `4010` , `4030` , and `5010` ." + "Version": "Returns the version to use for the specified X12 transaction set." }, "AWS::B2BI::Partnership": { "Capabilities": "Returns one or more capabilities associated with this partnership.", @@ -4012,7 +4040,45 @@ }, "AWS::B2BI::Transformer X12Details": { "TransactionSet": "Returns an enumerated type where each value identifies an X12 transaction set. Transaction sets are maintained by the X12 Accredited Standards Committee.", - "Version": "Returns the version to use for the specified X12 transaction set. Supported versions are `4010` , `4030` , and `5010` ." + "Version": "Returns the version to use for the specified X12 transaction set." + }, + "AWS::BCMDataExports::Export": { + "Export": "The details that are available for an export.", + "Tags": "" + }, + "AWS::BCMDataExports::Export DataQuery": { + "QueryStatement": "The query statement.", + "TableConfigurations": "The table configuration." + }, + "AWS::BCMDataExports::Export DestinationConfigurations": { + "S3Destination": "An object that describes the destination of the data exports file." + }, + "AWS::BCMDataExports::Export Export": { + "DataQuery": "The data query for this specific data export.", + "Description": "The description for this specific data export.", + "DestinationConfigurations": "The destination configuration for this specific data export.", + "ExportArn": "The Amazon Resource Name (ARN) for this export.", + "Name": "The name of this specific data export.", + "RefreshCadence": "The cadence for AWS to update the export in your S3 bucket." + }, + "AWS::BCMDataExports::Export RefreshCadence": { + "Frequency": "The frequency that data exports are updated. The export refreshes each time the source data updates, up to three times daily." + }, + "AWS::BCMDataExports::Export ResourceTag": { + "Key": "The key that's associated with the tag.", + "Value": "The value that's associated with the tag." + }, + "AWS::BCMDataExports::Export S3Destination": { + "S3Bucket": "The name of the Amazon S3 bucket used as the destination of a data export file.", + "S3OutputConfigurations": "The output configuration for the data export.", + "S3Prefix": "The S3 path prefix you want prepended to the name of your data export.", + "S3Region": "The S3 bucket Region." + }, + "AWS::BCMDataExports::Export S3OutputConfigurations": { + "Compression": "The compression type for the data export.", + "Format": "The file format for the data export.", + "OutputType": "The output type for the data export.", + "Overwrite": "The rule to follow when generating a version of the data export file. You have the choice to overwrite the previous version or to be delivered in addition to the previous versions. Overwriting exports can save on Amazon S3 storage costs. Creating new export versions allows you to track the changes in cost and usage data over time." }, "AWS::Backup::BackupPlan": { "BackupPlan": "Uniquely identifies the backup plan to be associated with the selection of resources.", @@ -4151,7 +4217,7 @@ "Algorithm": "Acceptable values include \"LATEST_WITHIN_WINDOW\" or \"RANDOM_WITHIN_WINDOW\"", "ExcludeVaults": "Accepted values include specific ARNs or list of selectors. Defaults to empty list if not listed.", "IncludeVaults": "Accepted values include wildcard [\"*\"] or by specific ARNs or ARN wilcard replacement [\"arn:aws:backup:us-west-2:123456789012:backup-vault:asdf\", ...] [\"arn:aws:backup:*:*:backup-vault:asdf-*\", ...]", - "RecoveryPointTypes": "These are the types of recovery points.", + "RecoveryPointTypes": "These are the types of recovery points.\n\nInclude `SNAPSHOT` to restore only snapshot recovery points; include `CONTINUOUS` to restore continuous recovery points (point in time restore / PITR); use both to restore either a snapshot or a continuous recovery point. The recovery point will be determined by the value for `Algorithm` .", "SelectionWindowDays": "Accepted values are integers from 1 to 365." }, "AWS::Backup::RestoreTestingPlan Tag": { @@ -4961,7 +5027,8 @@ }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleCustom": { "AllowedAnalyses": "The ARN of the analysis templates that are allowed by the custom analysis rule.", - "AllowedAnalysisProviders": "The IDs of the AWS accounts that are allowed to query by the custom analysis rule. Required when `allowedAnalyses` is `ANY_QUERY` ." + "AllowedAnalysisProviders": "The IDs of the AWS accounts that are allowed to query by the custom analysis rule. Required when `allowedAnalyses` is `ANY_QUERY` .", + "DifferentialPrivacy": "The differential privacy configuration." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleList": { "AllowedJoinOperators": "The logical operators (if any) that are to be used in an INNER JOIN match condition. Default is `AND` .", @@ -4976,6 +5043,12 @@ "Custom": "Analysis rule type that enables custom SQL queries on a configured table.", "List": "Analysis rule type that enables only list queries on a configured table." }, + "AWS::CleanRooms::ConfiguredTable DifferentialPrivacy": { + "Columns": "" + }, + "AWS::CleanRooms::ConfiguredTable DifferentialPrivacyColumn": { + "Name": "The name of the column, such as user_id, that contains the unique identifier of your users, whose privacy you want to protect. If you want to turn on differential privacy for two or more tables in a collaboration, you must configure the same column as the user identifier column in both analysis rules." + }, "AWS::CleanRooms::ConfiguredTable GlueTableReference": { "DatabaseName": "The name of the database the AWS Glue table belongs to.", "TableName": "The name of the AWS Glue table." @@ -5887,6 +5960,7 @@ "StartTime": "The start time of the range to exclude. The format is `yyyy-MM-dd'T'HH:mm:ss` . For example, `2019-07-01T23:59:59` ." }, "AWS::CloudWatch::AnomalyDetector SingleMetricAnomalyDetector": { + "AccountId": "If the CloudWatch metric that provides the time series that the anomaly detector uses as input is in another account, specify that account ID here. If you omit this parameter, the current account is used.", "Dimensions": "The metric dimensions to create the anomaly detection model for.", "MetricName": "The name of the metric to create the anomaly detection model for.", "Namespace": "The namespace of the metric to create the anomaly detection model for.", @@ -6096,7 +6170,7 @@ }, "AWS::CodeBuild::Project ProjectSourceVersion": { "SourceIdentifier": "An identifier for a source in the build project. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.", - "SourceVersion": "The source version for the corresponding source identifier. If specified, must be one of:\n\n- For CodeCommit: the commit ID, branch, or Git tag to use.\n- For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format `pr/pull-request-ID` (for example, `pr/25` ). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Amazon S3: the version ID of the object that represents the build input ZIP file to use.\n\nFor more information, see [Source Version Sample with CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) in the *AWS CodeBuild User Guide* ." + "SourceVersion": "The source version for the corresponding source identifier. If specified, must be one of:\n\n- For CodeCommit: the commit ID, branch, or Git tag to use.\n- For GitHub or GitLab: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format `pr/pull-request-ID` (for example, `pr/25` ). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Amazon S3: the version ID of the object that represents the build input ZIP file to use.\n\nFor more information, see [Source Version Sample with CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) in the *AWS CodeBuild User Guide* ." }, "AWS::CodeBuild::Project ProjectTriggers": { "BuildType": "Specifies the type of build this webhook will trigger. Allowed values are:\n\n- **BUILD** - A single build\n- **BUILD_BATCH** - A batch build", @@ -6119,10 +6193,10 @@ "GitCloneDepth": "The depth of history to download. Minimum value is 0. If this value is 0, greater than 25, or not provided, then the full history is downloaded with each build project. If your source type is Amazon S3, this value is not supported.", "GitSubmodulesConfig": "Information about the Git submodules configuration for the build project.", "InsecureSsl": "This is used with GitHub Enterprise only. Set to true to ignore SSL warnings while connecting to your GitHub Enterprise project repository. The default value is `false` . `InsecureSsl` should be used for testing purposes only. It should not be used in a production environment.", - "Location": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", + "Location": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeStar Connections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", "ReportBuildStatus": "Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an `invalidInputException` is thrown.", "SourceIdentifier": "An identifier for this project source. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.", - "Type": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub or GitHub Enterprise Cloud repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket." + "Type": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `GITLAB` : The source code is in a GitLab repository.\n- `GITLAB_SELF_MANAGED` : The source code is in a self-managed GitLab repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket." }, "AWS::CodeBuild::Project SourceAuth": { "Resource": "The resource value that applies to the specified authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", @@ -6166,8 +6240,8 @@ "Value": "The tag's value." }, "AWS::CodeBuild::SourceCredential": { - "AuthType": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, or PERSONAL_ACCESS_TOKEN.", - "ServerType": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, or BITBUCKET.", + "AuthType": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", + "ServerType": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET.", "Token": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is the app password.", "Username": "The Bitbucket username when the `authType` is BASIC_AUTH. This parameter is not valid for other types of source providers or connections." }, @@ -6597,10 +6671,12 @@ "AWS::CodeStarConnections::SyncConfiguration": { "Branch": "The branch associated with a specific sync configuration.", "ConfigFile": "The file path to the configuration file associated with a specific sync configuration. The path should point to an actual file in the sync configurations linked repository.", + "PublishDeploymentStatus": "Whether to enable or disable publishing of deployment status to source providers.", "RepositoryLinkId": "The ID of the repository link associated with a specific sync configuration.", "ResourceName": "The name of the connection resource associated with a specific sync configuration.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role associated with a specific sync configuration.", - "SyncType": "The type of sync for a specific sync configuration." + "SyncType": "The type of sync for a specific sync configuration.", + "TriggerResourceUpdateOn": "When to trigger Git sync to begin the stack update." }, "AWS::CodeStarNotifications::NotificationRule": { "CreatedBy": "The name or email alias of the person who created the notification rule.", @@ -8477,17 +8553,17 @@ "AWS::DMS::Endpoint PostgreSqlSettings": { "AfterConnectScript": "For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.\n\nExample: `afterConnectScript=SET session_replication_role='replica'`", "BabelfishDatabaseName": "The Babelfish for Aurora PostgreSQL database name for the endpoint.", - "CaptureDdls": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nIf this value is set to `N` , you don't have to create tables or triggers on the source database.", + "CaptureDdls": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nThe default value is `true` .\n\nIf this value is set to `N` , you don't have to create tables or triggers on the source database.", "DatabaseMode": "Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.", - "DdlArtifactsSchema": "The schema in which the operational DDL database artifacts are created.\n\nExample: `ddlArtifactsSchema=xyzddlschema;`", + "DdlArtifactsSchema": "The schema in which the operational DDL database artifacts are created.\n\nThe default value is `public` .\n\nExample: `ddlArtifactsSchema=xyzddlschema;`", "ExecuteTimeout": "Sets the client statement timeout for the PostgreSQL instance, in seconds. The default value is 60 seconds.\n\nExample: `executeTimeout=100;`", - "FailTasksOnLobTruncation": "When set to `true` , this value causes a task to fail if the actual size of a LOB column is greater than the specified `LobMaxSize` .\n\nIf task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.", - "HeartbeatEnable": "The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps `restart_lsn` moving and prevents storage full scenarios.", - "HeartbeatFrequency": "Sets the WAL heartbeat frequency (in minutes).", - "HeartbeatSchema": "Sets the schema in which the heartbeat artifacts are created.", - "MapBooleanAsBoolean": "When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as `varchar(5)` . You must set this setting on both the source and target endpoints for it to take effect.", - "MaxFileSize": "Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.\n\nExample: `maxFileSize=512`", - "PluginName": "Specifies the plugin to use to create a replication slot.", + "FailTasksOnLobTruncation": "When set to `true` , this value causes a task to fail if the actual size of a LOB column is greater than the specified `LobMaxSize` .\n\nThe default value is `false` .\n\nIf task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.", + "HeartbeatEnable": "The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps `restart_lsn` moving and prevents storage full scenarios.\n\nThe default value is `false` .", + "HeartbeatFrequency": "Sets the WAL heartbeat frequency (in minutes).\n\nThe default value is 5 minutes.", + "HeartbeatSchema": "Sets the schema in which the heartbeat artifacts are created.\n\nThe default value is `public` .", + "MapBooleanAsBoolean": "When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as `varchar(5)` . You must set this setting on both the source and target endpoints for it to take effect.\n\nThe default value is `false` .", + "MaxFileSize": "Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.\n\nThe default value is 32,768 KB (32 MB).\n\nExample: `maxFileSize=512`", + "PluginName": "Specifies the plugin to use to create a replication slot.\n\nThe default value is `pglogical` .", "SecretsManagerAccessRoleArn": "The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in `SecretsManagerSecret` . The role must allow the `iam:PassRole` action. `SecretsManagerSecret` has the value of the AWS Secrets Manager secret that allows access to the PostgreSQL endpoint.\n\n> You can specify one of two sets of values for these permissions. You can specify the values for this setting and `SecretsManagerSecretId` . Or you can specify clear-text values for `UserName` , `Password` , `ServerName` , and `Port` . You can't specify both.\n> \n> For more information on creating this `SecretsManagerSecret` , the corresponding `SecretsManagerAccessRoleArn` , and the `SecretsManagerSecretId` that is required to access it, see [Using secrets to access AWS Database Migration Service resources](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) in the *AWS Database Migration Service User Guide* .", "SecretsManagerSecretId": "The full ARN, partial ARN, or display name of the `SecretsManagerSecret` that contains the PostgreSQL endpoint connection details.", "SlotName": "Sets the name of a previously created logical replication slot for a change data capture (CDC) load of the PostgreSQL source instance.\n\nWhen used with the `CdcStartPosition` request parameter for the AWS DMS API , this attribute also makes it possible to use native CDC start points. DMS verifies that the specified logical replication slot exists before starting the CDC load task. It also verifies that the task was created with a valid setting of `CdcStartPosition` . If the specified slot doesn't exist or the task doesn't have a valid `CdcStartPosition` setting, DMS raises an error.\n\nFor more information about setting the `CdcStartPosition` request parameter, see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native) in the *AWS Database Migration Service User Guide* . For more information about using `CdcStartPosition` , see [CreateReplicationTask](https://docs.aws.amazon.com/dms/latest/APIReference/API_CreateReplicationTask.html) , [StartReplicationTask](https://docs.aws.amazon.com/dms/latest/APIReference/API_StartReplicationTask.html) , and [ModifyReplicationTask](https://docs.aws.amazon.com/dms/latest/APIReference/API_ModifyReplicationTask.html) ." @@ -9854,11 +9930,14 @@ "AdminUserName": "The name of the Amazon DocumentDB elastic clusters administrator.\n\n*Constraints* :\n\n- Must be from 1 to 63 letters or numbers.\n- The first character must be a letter.\n- Cannot be a reserved word.", "AdminUserPassword": "The password for the Elastic DocumentDB cluster administrator and can contain any printable ASCII characters.\n\n*Constraints* :\n\n- Must contain from 8 to 100 characters.\n- Cannot contain a forward slash (/), double quote (\"), or the \"at\" symbol (@).\n- A valid `AdminUserName` entry is also required.", "AuthType": "The authentication type used to determine where to fetch the password used for accessing the elastic cluster. Valid types are `PLAIN_TEXT` or `SECRET_ARN` .", + "BackupRetentionPeriod": "The number of days for which automatic snapshots are retained.", "ClusterName": "The name of the new elastic cluster. This parameter is stored as a lowercase string.\n\n*Constraints* :\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- The first character must be a letter.\n- Cannot end with a hyphen or contain two consecutive hyphens.\n\n*Example* : `my-cluster`", "KmsKeyId": "The KMS key identifier to use to encrypt the new elastic cluster.\n\nThe KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a cluster using the same Amazon account that owns this KMS encryption key, you can use the KMS key alias instead of the ARN as the KMS encryption key.\n\nIf an encryption key is not specified, Amazon DocumentDB uses the default encryption key that KMS creates for your account. Your account has a different default encryption key for each Amazon Region.", + "PreferredBackupWindow": "The daily time range during which automated backups are created if automated backups are enabled, as determined by `backupRetentionPeriod` .", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\n*Format* : `ddd:hh24:mi-ddd:hh24:mi`\n\n*Default* : a 30-minute window selected at random from an 8-hour block of time for each AWS Region , occurring on a random day of the week.\n\n*Valid days* : Mon, Tue, Wed, Thu, Fri, Sat, Sun\n\n*Constraints* : Minimum 30-minute window.", "ShardCapacity": "The number of vCPUs assigned to each elastic cluster shard. Maximum is 64. Allowed values are 2, 4, 8, 16, 32, 64.", "ShardCount": "The number of shards assigned to the elastic cluster. Maximum is 32.", + "ShardInstanceCount": "The number of replica instances applying to all shards in the cluster. A `shardInstanceCount` value of 1 means there is one writer instance, and any additional instances are replicas that can be used for reads and to improve availability.", "SubnetIds": "The Amazon EC2 subnet IDs for the new elastic cluster.", "Tags": "The tags to be assigned to the new elastic cluster.", "VpcSecurityGroupIds": "A list of EC2 VPC security groups to associate with the new elastic cluster." @@ -10557,7 +10636,7 @@ "SecurityGroups": "[Default VPC] The names of the security groups. For a nondefault VPC, you must use security group IDs instead.\n\nYou cannot specify this option and the network interfaces option in the same request. The list can contain both the name of existing Amazon EC2 security groups or references to AWS::EC2::SecurityGroup resources created in the template.\n\nDefault: Amazon EC2 uses the default security group.", "SourceDestCheck": "Enable or disable source/destination checks, which ensure that the instance is either the source or the destination of any traffic that it receives. If the value is `true` , source/destination checks are enabled; otherwise, they are disabled. The default value is `true` . You must disable source/destination checks if the instance runs services such as network address translation, routing, or firewalls.", "SsmAssociations": "The SSM [document](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ssm-document.html) and parameter values in AWS Systems Manager to associate with this instance. To use this property, you must specify an IAM instance profile role for the instance. For more information, see [Create an IAM instance profile for Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-configuring-access-role.html) in the *AWS Systems Manager User Guide* .\n\n> You can associate only one document with an instance.", - "SubnetId": "The ID of the subnet to launch the instance into.\n\nIf you specify a network interface, you must specify any subnets as part of the network interface.", + "SubnetId": "The ID of the subnet to launch the instance into.\n\nIf you specify a network interface, you must specify any subnets as part of the network interface instead of using this parameter.", "Tags": "The tags to add to the instance. These tags are not applied to the EBS volumes, such as the root volume, unless [PropagateTagsToVolumeOnCreation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html#cfn-ec2-instance-propagatetagstovolumeoncreation) is `true` .", "Tenancy": "The tenancy of the instance. An instance with a tenancy of `dedicated` runs on single-tenant hardware.", "UserData": "The parameters or scripts to store as user data. Any scripts in user data are run when you launch the instance. User data is limited to 16 KB. You must provide base64-encoded text. For more information, see [Fn::Base64](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-base64.html) .\n\nIf the root volume is an EBS volume and you update user data, CloudFormation restarts the instance. If the root volume is an instance store volume and you update user data, the instance is replaced.", @@ -10626,7 +10705,7 @@ "PrivateIpAddress": "The private IPv4 address of the network interface. Applies only if creating a network interface when launching an instance.", "PrivateIpAddresses": "One or more private IPv4 addresses to assign to the network interface. Only one private IPv4 address can be designated as primary.", "SecondaryPrivateIpAddressCount": "The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option.", - "SubnetId": "The ID of the subnet associated with the network interface. Applies only if creating a network interface when launching an instance." + "SubnetId": "The ID of the subnet associated with the network interface." }, "AWS::EC2::Instance PrivateDnsNameOptions": { "EnableResourceNameDnsAAAARecord": "Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. For more information, see [Amazon EC2 instance hostname types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html) in the *Amazon Elastic Compute Cloud User Guide* .", @@ -10816,12 +10895,12 @@ "MaintenanceOptions": "The maintenance options of your instance.", "MetadataOptions": "The metadata options for the instance. For more information, see [Instance metadata and user data](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) in the *Amazon Elastic Compute Cloud User Guide* .", "Monitoring": "The monitoring for the instance.", - "NetworkInterfaces": "One or more network interfaces. If you specify a network interface, you must specify any security groups and subnets as part of the network interface.", + "NetworkInterfaces": "The network interfaces for the instance.", "Placement": "The placement for the instance.", "PrivateDnsNameOptions": "The hostname type for EC2 instances launched into this subnet and how DNS A and AAAA record queries should be handled. For more information, see [Amazon EC2 instance hostname types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html) in the *Amazon Elastic Compute Cloud User Guide* .", "RamDiskId": "The ID of the RAM disk.\n\n> We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see [User provided kernels](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the *Amazon Elastic Compute Cloud User Guide* .", - "SecurityGroupIds": "The IDs of the security groups. You can specify the IDs of existing security groups and references to resources created by the stack template.", - "SecurityGroups": "One or more security group names. For a nondefault VPC, you must use security group IDs instead.", + "SecurityGroupIds": "The IDs of the security groups. You can specify the IDs of existing security groups and references to resources created by the stack template.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead.", + "SecurityGroups": "The names of the security groups. For a nondefault VPC, you must use security group IDs instead.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead of using this parameter.", "TagSpecifications": "The tags to apply to the resources that are created during instance launch.\n\nTo tag a resource after it has been created, see [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) .\n\nTo tag the launch template itself, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", "UserData": "The user data to make available to the instance. You must provide base64-encoded text. User data is limited to 16 KB. For more information, see [Run commands on your Linux instance at launch](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) (Linux) or [Work with instance user data](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/instancedata-add-user-data.html) (Windows) in the *Amazon Elastic Compute Cloud User Guide* .\n\nIf you are creating the launch template for use with AWS Batch , the user data must be provided in the [MIME multi-part archive format](https://docs.aws.amazon.com/https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive) . For more information, see [Amazon EC2 user data in launch templates](https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html) in the *AWS Batch User Guide* ." }, @@ -10837,8 +10916,7 @@ "LicenseConfigurationArn": "The Amazon Resource Name (ARN) of the license configuration." }, "AWS::EC2::LaunchTemplate MaintenanceOptions": { - "AutoRecovery": "Disables the automatic recovery behavior of your instance or sets it to default.", - "RebootMigration": "" + "AutoRecovery": "Disables the automatic recovery behavior of your instance or sets it to default." }, "AWS::EC2::LaunchTemplate MemoryGiBPerVCpu": { "Max": "The maximum amount of memory per vCPU, in GiB. To specify no maximum limit, omit this parameter.", @@ -11469,7 +11547,7 @@ "NetworkInterfaceId": "The ID of the network interface.\n\nIf you are creating a Spot Fleet, omit this parameter because you can\u2019t specify a network interface ID in a launch specification.", "PrivateIpAddresses": "The private IPv4 addresses to assign to the network interface. Only one private IPv4 address can be designated as primary. You cannot specify this option if you're launching more than one instance in a [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) request.", "SecondaryPrivateIpAddressCount": "The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) request.", - "SubnetId": "The ID of the subnet associated with the network interface. Applies only if creating a network interface when launching an instance." + "SubnetId": "The ID of the subnet associated with the network interface." }, "AWS::EC2::SpotFleet InstanceRequirementsRequest": { "AcceleratorCount": "The minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips) on an instance.\n\nTo exclude accelerator-enabled instance types, set `Max` to `0` .\n\nDefault: No minimum or maximum limits", @@ -11548,12 +11626,12 @@ "KernelId": "The ID of the kernel.", "KeyName": "The name of the key pair.", "Monitoring": "Enable or disable monitoring for the instances.", - "NetworkInterfaces": "One or more network interfaces. If you specify a network interface, you must specify subnet IDs and security group IDs using the network interface.\n\n> `SpotFleetLaunchSpecification` currently does not support Elastic Fabric Adapter (EFA). To specify an EFA, you must use [LaunchTemplateConfig](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html) .", + "NetworkInterfaces": "The network interfaces.", "Placement": "The placement information.", "RamdiskId": "The ID of the RAM disk. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, refer to the AWS Resource Center and search for the kernel ID.", - "SecurityGroups": "The security groups.", + "SecurityGroups": "The security groups.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead of using this parameter.", "SpotPrice": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.\n\n> If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.", - "SubnetId": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".", + "SubnetId": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".\n\nIf you specify a network interface, you must specify any subnets as part of the network interface instead of using this parameter.", "TagSpecifications": "The tags to apply during creation.", "UserData": "The base64-encoded user data that instances use when starting up. User data is limited to 16 KB.", "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1." @@ -11622,6 +11700,7 @@ "AvailabilityZoneId": "The AZ ID of the subnet.", "CidrBlock": "The IPv4 CIDR block assigned to the subnet.\n\nIf you update this property, we create a new subnet, and then delete the existing one.", "EnableDns64": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "EnableLniAtDeviceIndex": "Indicates the device position for local network interfaces in this subnet. For example, `1` indicates local network interfaces in this subnet are the secondary network interface (eth1).", "Ipv4IpamPoolId": "An IPv4 IPAM pool ID for the subnet.", "Ipv4NetmaskLength": "An IPv4 netmask length for the subnet.", "Ipv6CidrBlock": "The IPv6 CIDR block.\n\nIf you specify `AssignIpv6AddressOnCreation` , you must also specify an IPv6 CIDR block.", @@ -12285,7 +12364,7 @@ "Alarms": "Information about the CloudWatch alarms.", "DeploymentCircuitBreaker": "> The deployment circuit breaker can only be used for services using the rolling update ( `ECS` ) deployment type. \n\nThe *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*", "MaximumPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and tasks that use the EC2 launch type, the *maximum percent* value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", - "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service." + "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service." }, "AWS::ECS::Service DeploymentController": { "Type": "The deployment controller type to use. There are three deployment controller types available:\n\n- **ECS** - The rolling update ( `ECS` ) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the [DeploymentConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeploymentConfiguration.html) .\n- **CODE_DEPLOY** - The blue/green ( `CODE_DEPLOY` ) deployment type uses the blue/green deployment model powered by AWS CodeDeploy , which allows you to verify a new deployment of a service before sending production traffic to it.\n- **EXTERNAL** - The external ( `EXTERNAL` ) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service." @@ -13561,10 +13640,12 @@ }, "AWS::ElastiCache::ServerlessCache DataStorage": { "Maximum": "The upper limit for data storage the cache is set to use.", + "Minimum": "The lower limit for data storage the cache is set to use.", "Unit": "The unit that the storage is measured in, in GB." }, "AWS::ElastiCache::ServerlessCache ECPUPerSecond": { - "Maximum": "The configuration for the maximum number of ECPUs the cache can consume per second." + "Maximum": "The configuration for the maximum number of ECPUs the cache can consume per second.", + "Minimum": "The configuration for the minimum number of ECPUs the cache should be able consume per second." }, "AWS::ElastiCache::ServerlessCache Endpoint": { "Address": "The DNS hostname of the cache node.", @@ -14133,7 +14214,8 @@ }, "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowInputSource": { "InputSourceARN": "An AWS Glue table ARN for the input source table.", - "SchemaArn": "The ARN (Amazon Resource Name) that AWS Entity Resolution generated for the `SchemaMapping` ." + "SchemaArn": "The ARN (Amazon Resource Name) that AWS Entity Resolution generated for the `SchemaMapping` .", + "Type": "" }, "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowOutputSource": { "KMSArn": "Customer AWS KMS ARN for encryption at rest. If not provided, system will use an AWS Entity Resolution managed KMS key.", @@ -14185,7 +14267,7 @@ }, "AWS::EntityResolution::MatchingWorkflow ResolutionTechniques": { "ProviderProperties": "The properties of the provider service.", - "ResolutionType": "The type of matching. There are two types of matching: `RULE_MATCHING` and `ML_MATCHING` .", + "ResolutionType": "The type of matching. There are three types of matching: `RULE_MATCHING` , `ML_MATCHING` , and `PROVIDER` .", "RuleBasedProperties": "An object which defines the list of matching rules to run and has a field `Rules` , which is a list of rule objects." }, "AWS::EntityResolution::MatchingWorkflow Rule": { @@ -15494,7 +15576,7 @@ "TrafficDialPercentage": "The percentage of traffic to send to an AWS Regions . Additional traffic is distributed to other endpoint groups for this listener.\n\nUse this action to increase (dial up) or decrease (dial down) traffic to a specific Region. The percentage is applied to the traffic that would otherwise have been routed to the Region based on optimal routing.\n\nThe default value is 100." }, "AWS::GlobalAccelerator::EndpointGroup EndpointConfiguration": { - "AttachmentArn": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints to accelerators.", + "AttachmentArn": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints.", "ClientIPPreservationEnabled": "Indicates whether client IP address preservation is enabled for an Application Load Balancer endpoint. The value is true or false. The default value is true for new accelerators.\n\nIf the value is set to true, the client's IP address is preserved in the `X-Forwarded-For` request header as traffic travels to applications on the Application Load Balancer endpoint fronted by the accelerator.\n\nFor more information, see [Preserve Client IP Addresses](https://docs.aws.amazon.com/global-accelerator/latest/dg/preserve-client-ip-address.html) in the *AWS Global Accelerator Developer Guide* .", "EndpointId": "An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID. For Amazon EC2 instances, this is the EC2 instance ID. A resource must be valid and active when you add it as an endpoint.\n\nFor cross-account endpoints, this must be the ARN of the resource.", "Weight": "The weight associated with the endpoint. When you add weights to endpoints, you configure Global Accelerator to route traffic based on proportions that you specify. For example, you might specify endpoint weights of 4, 5, 5, and 6 (sum=20). The result is that 4/20 of your traffic, on average, is routed to the first endpoint, 5/20 is routed both to the second and third endpoints, and 6/20 is routed to the last endpoint. For more information, see [Endpoint Weights](https://docs.aws.amazon.com/global-accelerator/latest/dg/about-endpoints-endpoint-weights.html) in the *AWS Global Accelerator Developer Guide* ." @@ -17562,9 +17644,9 @@ }, "AWS::InternetMonitor::Monitor": { "HealthEventsConfig": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Amazon CloudWatch Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", - "IncludeLinkedAccounts": "", + "IncludeLinkedAccounts": "A boolean option that you can set to `TRUE` to include monitors for linked accounts in a list of monitors, when you've set up cross-account sharing in Internet Monitor. You configure cross-account sharing by using Amazon CloudWatch Observability Access Manager. For more information, see [Internet Monitor cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cwim-cross-account.html) in the Amazon CloudWatch User Guide.", "InternetMeasurementsLogDelivery": "Publish internet measurements for a monitor for all city-networks (up to the 500,000 service limit) to another location, such as an Amazon S3 bucket. Measurements are also published to Amazon CloudWatch Logs for the first 500 (by traffic volume) city-networks (client locations and ASNs, typically internet service providers or ISPs).", - "LinkedAccountId": "", + "LinkedAccountId": "The account ID for an account that you've set up cross-account sharing for in Internet Monitor. You configure cross-account sharing by using Amazon CloudWatch Observability Access Manager. For more information, see [Internet Monitor cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cwim-cross-account.html) in the Amazon CloudWatch User Guide.", "MaxCityNetworksToMonitor": "The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network, such as an internet service provider, that clients access the resources through.\n\nFor more information, see [Choosing a city-network maximum value](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/IMCityNetworksMaximum.html) in *Using Amazon CloudWatch Internet Monitor* .", "MonitorName": "The name of the monitor. A monitor name can contain only alphanumeric characters, dashes (-), periods (.), and underscores (_).", "Resources": "The resources that have been added for the monitor, listed by their Amazon Resource Names (ARNs). Use this option to add or remove resources when making an update.\n\n> Be aware that if you include content in the `Resources` field when you update a monitor, the `ResourcesToAdd` and `ResourcesToRemove` fields must be empty.", @@ -19181,27 +19263,27 @@ "AWS::IoTSiteWise::AssetModel AssetModelCompositeModel": { "ComposedAssetModelId": "The ID of a component model which is reused to create this composite model.", "CompositeModelProperties": "The asset property definitions for this composite model.", - "Description": "The description of the composite model.", - "ExternalId": "The external ID of a composite model on this asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "Description": "The description of the composite model.\n\n> If the composite model is a `component-model-based` composite model, the description is inherited from the `COMPONENT_MODEL` asset model and cannot be changed.", + "ExternalId": "The external ID of a composite model on this asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `Path` must be specified.", "Id": "The ID of the asset model composite model.\n\n> This is a return value and can't be set.", "Name": "The name of the composite model.", - "ParentAssetModelCompositeModelExternalId": "The external ID of the parent asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> If `ParentCompositeModelExternalId` is specified, this value overrides the value of `ExternalId` , if both are included.", - "Path": "The structured path to the property from the root of the asset using property names. Path is used as the ID if the asset model is a derived composite model.", + "ParentAssetModelCompositeModelExternalId": "The external ID of the parent composite model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "Path": "The structured path to the property from the root of the asset using property names. Path is used as the ID if the asset model is a derived composite model.\n\n> One of `ExternalId` or `Path` must be specified.", "Type": "The type of the composite model. For alarm composite models, this type is `AWS/ALARM` ." }, "AWS::IoTSiteWise::AssetModel AssetModelHierarchy": { "ChildAssetModelId": "The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the `childAssetModelId` asset model. AWS IoT SiteWise will always return the actual asset model ID for this value. However, when you are specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) , you may provide either the asset model ID or else `externalId:` followed by the asset model's external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", - "ExternalId": "The external ID (if any) provided in the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) operation. You can assign an external ID by specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) . However, you can't change the external ID if one is already assigned. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "ExternalId": "The external ID (if any) provided in the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) operation. You can assign an external ID by specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) . However, you can't change the external ID if one is already assigned. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "Id": "The ID of the asset model hierarchy. This ID is a `hierarchyId` .\n\n> This is a return value and can't be set. \n\n- If you are callling [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) to create a *new* hierarchy: You can specify its ID here, if desired. AWS IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique.\n- If you are calling UpdateAssetModel to modify an *existing* hierarchy: This can be either the actual ID in UUID format, or else `externalId:` followed by the external ID, if it has one. For more information, see [Referencing objects with external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-id-references) in the *AWS IoT SiteWise User Guide* .", - "LogicalId": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .", + "LogicalId": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "Name": "The name of the asset model hierarchy that you specify by using the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) API operation." }, "AWS::IoTSiteWise::AssetModel AssetModelProperty": { "DataType": "The data type of the asset model property.", "DataTypeSpec": "The data type of the structure for this property. This parameter exists on properties that have the `STRUCT` data type.", - "ExternalId": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "ExternalId": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "Id": "The ID of the property.\n\n> This is a return value and can't be set.", - "LogicalId": "The `LogicalID` of the asset model property.", + "LogicalId": "The `LogicalID` of the asset model property.\n\n> One of `ExternalId` or `LogicalId` must be specified.", "Name": "The name of the asset model property.", "Type": "Contains a property type, which can be one of `attribute` , `measurement` , `metric` , or `transform` .", "Unit": "The unit of the asset model property, such as `Newtons` or `RPM` ." @@ -20004,14 +20086,14 @@ "DatabaseHost": "The name of the host for the database. Can be either a string (host.subdomain.domain.tld) or an IPv4 or IPv6 address.", "DatabaseName": "The name of the database containing the document data.", "DatabasePort": "The port that the database uses for connections.", - "SecretArn": "The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager . The credentials should be a user/password pair. For more information, see [Using a Database Data Source](https://docs.aws.amazon.com/kendra/latest/dg/data-source-database.html) . For more information about AWS Secrets Manager , see [What Is AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) in the *AWS Secrets Manager* user guide.", + "SecretArn": "The Amazon Resource Name (ARN) of an AWS Secrets Manager secret that stores the credentials. The credentials should be a user-password pair. For more information, see [Using a Database Data Source](https://docs.aws.amazon.com/kendra/latest/dg/data-source-database.html) . For more information about AWS Secrets Manager , see [What Is AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) in the *AWS Secrets Manager* user guide.", "TableName": "The name of the table that contains the document data." }, "AWS::Kendra::DataSource CustomDocumentEnrichmentConfiguration": { "InlineConfigurations": "Configuration information to alter document attributes or metadata fields and content when ingesting documents into Amazon Kendra.", "PostExtractionHookConfiguration": "Configuration information for invoking a Lambda function in AWS Lambda on the structured documents with their metadata and text extracted. You can use a Lambda function to apply advanced logic for creating, modifying, or deleting document metadata and content. For more information, see [Advanced data manipulation](https://docs.aws.amazon.com/kendra/latest/dg/custom-document-enrichment.html#advanced-data-manipulation) .", "PreExtractionHookConfiguration": "Configuration information for invoking a Lambda function in AWS Lambda on the original or raw documents before extracting their metadata and text. You can use a Lambda function to apply advanced logic for creating, modifying, or deleting document metadata and content. For more information, see [Advanced data manipulation](https://docs.aws.amazon.com/kendra/latest/dg/custom-document-enrichment.html#advanced-data-manipulation) .", - "RoleArn": "The Amazon Resource Name (ARN) of a role with permission to run `PreExtractionHookConfiguration` and `PostExtractionHookConfiguration` for altering document metadata and content during the document ingestion process. For more information, see [IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) ." + "RoleArn": "The Amazon Resource Name (ARN) of an IAM role with permission to run `PreExtractionHookConfiguration` and `PostExtractionHookConfiguration` for altering document metadata and content during the document ingestion process. For more information, see [an IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) ." }, "AWS::Kendra::DataSource DataSourceConfiguration": { "ConfluenceConfiguration": "Provides the configuration information to connect to Confluence as your data source.", @@ -20072,7 +20154,7 @@ }, "AWS::Kendra::DataSource HookConfiguration": { "InvocationCondition": "The condition used for when a Lambda function should be invoked.\n\nFor example, you can specify a condition that if there are empty date-time values, then Amazon Kendra should invoke a function that inserts the current date-time.", - "LambdaArn": "The Amazon Resource Name (ARN) of a role with permission to run a Lambda function during ingestion. For more information, see [IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", + "LambdaArn": "The Amazon Resource Name (ARN) of an IAM role with permission to run a Lambda function during ingestion. For more information, see [an IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", "S3Bucket": "Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see [Data contracts for Lambda functions](https://docs.aws.amazon.com/kendra/latest/dg/custom-document-enrichment.html#cde-data-contracts-lambda) ." }, "AWS::Kendra::DataSource InlineCustomDocumentEnrichmentConfiguration": { @@ -20090,11 +20172,11 @@ "TenantDomain": "The Azure Active Directory domain of the organization." }, "AWS::Kendra::DataSource OneDriveUsers": { - "OneDriveUserList": "A list of users whose documents should be indexed. Specify the user names in email format, for example, `username@tenantdomain` . If you need to index the documents of more than 100 users, use the `OneDriveUserS3Path` field to specify the location of a file containing a list of users.", + "OneDriveUserList": "A list of users whose documents should be indexed. Specify the user names in email format, for example, `username@tenantdomain` . If you need to index the documents of more than 10 users, use the `OneDriveUserS3Path` field to specify the location of a file containing a list of users.", "OneDriveUserS3Path": "The S3 bucket location of a file containing a list of users whose documents should be indexed." }, "AWS::Kendra::DataSource ProxyConfiguration": { - "Credentials": "Your secret ARN, which you can create in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nThe credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.", + "Credentials": "The Amazon Resource Name (ARN) of an AWS Secrets Manager secret. You create a secret to store your credentials in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nThe credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.", "Host": "The name of the website host you want to connect to via a web proxy server.\n\nFor example, the host name of https://a.example.com/page1.html is \"a.example.com\".", "Port": "The port number of the website host you want to connect to via a web proxy server.\n\nFor example, the port for https://a.example.com/page1.html is 443, the standard port for HTTPS." }, @@ -20196,14 +20278,14 @@ "QueryIdentifiersEnclosingOption": "Determines whether Amazon Kendra encloses SQL identifiers for tables and column names in double quotes (\") when making a database query. You can set the value to `DOUBLE_QUOTES` or `NONE` .\n\nBy default, Amazon Kendra passes SQL identifiers the way that they are entered into the data source configuration. It does not change the case of identifiers or enclose them in quotes.\n\nPostgreSQL internally converts uppercase characters to lower case characters in identifiers unless they are quoted. Choosing this option encloses identifiers in quotes so that PostgreSQL does not convert the character's case.\n\nFor MySQL databases, you must enable the ansi_quotes option when you set this field to `DOUBLE_QUOTES` ." }, "AWS::Kendra::DataSource Tag": { - "Key": "The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, or data source.", + "Key": "The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, data source, or other resource.", "Value": "The value associated with the tag. The value may be an empty string but it can't be null." }, "AWS::Kendra::DataSource WebCrawlerAuthenticationConfiguration": { "BasicAuthentication": "The list of configuration information that's required to connect to and crawl a website host using basic authentication credentials.\n\nThe list includes the name and port number of the website host." }, "AWS::Kendra::DataSource WebCrawlerBasicAuthentication": { - "Credentials": "Your secret ARN, which you can create in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nYou use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.", + "Credentials": "The Amazon Resource Name (ARN) of an AWS Secrets Manager secret. You create a secret to store your credentials in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nYou use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.", "Host": "The name of the website host you want to connect to using authentication credentials.\n\nFor example, the host name of https://a.example.com/page1.html is \"a.example.com\".", "Port": "The port number of the website host you want to connect to using authentication credentials.\n\nFor example, the port for https://a.example.com/page1.html is 443, the standard port for HTTPS." }, @@ -20252,7 +20334,7 @@ "Key": "The name of the file." }, "AWS::Kendra::Faq Tag": { - "Key": "The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, or data source.", + "Key": "The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, data source, or other resource.", "Value": "The value associated with the tag. The value may be an empty string but it can't be null." }, "AWS::Kendra::Index": { @@ -20307,7 +20389,7 @@ "KmsKeyId": "The identifier of the AWS KMS key . Amazon Kendra doesn't support asymmetric keys." }, "AWS::Kendra::Index Tag": { - "Key": "The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, or data source.", + "Key": "The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, data source, or other resource.", "Value": "The value associated with the tag. The value may be an empty string but it can't be null." }, "AWS::Kendra::Index UserTokenConfiguration": { @@ -23089,7 +23171,7 @@ }, "AWS::ManagedBlockchain::Node": { "MemberId": "The unique identifier of the member to which the node belongs. Applies only to Hyperledger Fabric.", - "NetworkId": "The unique identifier of the network for the node.\n\nEthereum public networks have the following `NetworkId` s:\n\n- `n-ethereum-mainnet`\n- `n-ethereum-goerli`", + "NetworkId": "The unique identifier of the network for the node.\n\nEthereum public networks have the following `NetworkId` s:\n\n- `n-ethereum-mainnet`", "NodeConfiguration": "Configuration properties of a peer node." }, "AWS::ManagedBlockchain::Node NodeConfiguration": { @@ -25823,7 +25905,7 @@ }, "AWS::Oam::Link": { "LabelTemplate": "Specify a friendly human-readable name to use to identify this source account when you are viewing data from it in the monitoring account.\n\nYou can include the following variables in your template:\n\n- `$AccountName` is the name of the account\n- `$AccountEmail` is a globally-unique email address, which includes the email domain, such as `mariagarcia@example.com`\n- `$AccountEmailNoDomain` is an email address without the domain name, such as `mariagarcia`", - "ResourceTypes": "An array of strings that define which types of data that the source account shares with the monitoring account. Valid values are `AWS::CloudWatch::Metric | AWS::Logs::LogGroup | AWS::XRay::Trace | AWS::ApplicationInsights::Application` .", + "ResourceTypes": "An array of strings that define which types of data that the source account shares with the monitoring account. Valid values are `AWS::CloudWatch::Metric | AWS::Logs::LogGroup | AWS::XRay::Trace | AWS::ApplicationInsights::Application | AWS::InternetMonitor::Monitor` .", "SinkIdentifier": "The ARN of the sink in the monitoring account that you want to link to. You can use [ListSinks](https://docs.aws.amazon.com/OAM/latest/APIReference/API_ListSinks.html) to find the ARNs of sinks.", "Tags": "An array of key-value pairs to apply to the link.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, @@ -27412,7 +27494,7 @@ "KinesisStreamParameters": "The parameters for using a Kinesis stream as a source.", "ManagedStreamingKafkaParameters": "The parameters for using an MSK stream as a source.", "RabbitMQBrokerParameters": "The parameters for using a Rabbit MQ broker as a source.", - "SelfManagedKafkaParameters": "The parameters for using a stream as a source.", + "SelfManagedKafkaParameters": "The parameters for using a stream as a source.\n\nA *self managed* cluster refers to any Apache Kafka cluster not hosted by AWS . This includes both clusters you manage yourself, as well as those hosted by a third-party provider, such as [Confluent Cloud](https://docs.aws.amazon.com/https://www.confluent.io/) , [CloudKarafka](https://docs.aws.amazon.com/https://www.cloudkarafka.com/) , or [Redpanda](https://docs.aws.amazon.com/https://redpanda.com/) . For more information, see [Apache Kafka streams as a source](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-kafka.html) in the *Amazon EventBridge User Guide* .", "SqsQueueParameters": "The parameters for using a Amazon SQS stream as a source." }, "AWS::Pipes::Pipe PipeSourceRabbitMQBrokerParameters": { @@ -36017,8 +36099,8 @@ "ReplicationSourceIdentifier": "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.\n\nValid for: Aurora DB clusters only", "RestoreToTime": "The date and time to restore the DB cluster to.\n\nValid Values: Value must be a time in Universal Coordinated Time (UTC) format\n\nConstraints:\n\n- Must be before the latest restorable time for the DB instance\n- Must be specified if `UseLatestRestorableTime` parameter isn't provided\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled\n- Can't be specified if the `RestoreType` parameter is `copy-on-write`\n\nThis property must be used with `SourceDBClusterIdentifier` property. The resulting cluster will have the identifier that matches the value of the `DBclusterIdentifier` property.\n\nExample: `2015-03-07T23:45:00Z`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "RestoreType": "The type of restore to be performed. You can specify one of the following values:\n\n- `full-copy` - The new DB cluster is restored as a full copy of the source DB cluster.\n- `copy-on-write` - The new DB cluster is restored as a clone of the source DB cluster.\n\nIf you don't specify a `RestoreType` value, then the new DB cluster is restored as a full copy of the source DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "ScalingConfiguration": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", - "ServerlessV2ScalingConfiguration": "The `ServerlessV2ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "ScalingConfiguration": "The scaling configuration of an Aurora Serverless v1 DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora Serverless v1 DB clusters only", + "ServerlessV2ScalingConfiguration": "The scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora Serverless v2 DB clusters only", "SnapshotIdentifier": "The identifier for the DB snapshot or DB cluster snapshot to restore from.\n\nYou can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.\n\nAfter you restore a DB cluster with a `SnapshotIdentifier` property, you must specify the same `SnapshotIdentifier` property for any future updates to the DB cluster. When you specify this property for an update, the DB cluster is not restored from the snapshot again, and the data in the database is not changed. However, if you don't specify the `SnapshotIdentifier` property, an empty DB cluster is created, and the original DB cluster is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB cluster is restored from the specified `SnapshotIdentifier` property, and the original DB cluster is deleted.\n\nIf you specify the `SnapshotIdentifier` property to restore a DB cluster (as opposed to specifying it for DB cluster updates), then don't specify the following properties:\n\n- `GlobalClusterIdentifier`\n- `MasterUsername`\n- `MasterUserPassword`\n- `ReplicationSourceIdentifier`\n- `RestoreType`\n- `SourceDBClusterIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `UseLatestRestorableTime`\n\nConstraints:\n\n- Must match the identifier of an existing Snapshot.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "SourceDBClusterIdentifier": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "SourceRegion": "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, `us-east-1` .\n\nValid for: Aurora DB clusters only", @@ -36288,6 +36370,8 @@ }, "AWS::RDS::Integration": { "AdditionalEncryptionContext": "An optional set of non-secret key\u2013value pairs that contains additional contextual information about the data. For more information, see [Encryption context](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the *AWS Key Management Service Developer Guide* .\n\nYou can only include this parameter if you specify the `KMSKeyId` parameter.", + "DataFilter": "Data filters for the integration. These filters determine which tables from the source database are sent to the target Amazon Redshift data warehouse.", + "Description": "A description of the integration.", "IntegrationName": "The name of the integration.", "KMSKeyId": "The AWS Key Management System ( AWS KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, RDS uses a default AWS owned key.", "SourceArn": "The Amazon Resource Name (ARN) of the database to use as the source for replication.", @@ -38548,12 +38632,12 @@ "ContactTargetInfo": "Information about the contact that Incident Manager engages." }, "AWS::SSMContacts::Rotation": { - "ContactIds": "The Amazon Resource Names (ARNs) of the contacts to add to the rotation.\n\nThe order in which you list the contacts is their shift order in the rotation schedule.", + "ContactIds": "The Amazon Resource Names (ARNs) of the contacts to add to the rotation.\n\n> Only the `PERSONAL` contact type is supported. The contact types `ESCALATION` and `ONCALL_SCHEDULE` are not supported for this operation. \n\nThe order in which you list the contacts is their shift order in the rotation schedule.", "Name": "The name for the rotation.", "Recurrence": "Information about the rule that specifies when shift team members rotate.", "StartTime": "The date and time the rotation goes into effect.", "Tags": "Optional metadata to assign to the rotation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For more information, see [Tagging Incident Manager resources](https://docs.aws.amazon.com/incident-manager/latest/userguide/tagging.html) in the *Incident Manager User Guide* .", - "TimeZoneId": "The time zone to base the rotation\u2019s activity on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"UTC\", or \"Asia/Seoul\". For more information, see the [Time Zone Database](https://docs.aws.amazon.com/https://www.iana.org/time-zones) on the IANA website." + "TimeZoneId": "The time zone to base the rotation\u2019s activity on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"UTC\", or \"Asia/Seoul\". For more information, see the [Time Zone Database](https://docs.aws.amazon.com/https://www.iana.org/time-zones) on the IANA website.\n\n> Designators for time zones that don\u2019t support Daylight Savings Time rules, such as Pacific Standard Time (PST), are not supported." }, "AWS::SSMContacts::Rotation CoverageTime": { "EndTime": "Information about when an on-call rotation shift ends.", @@ -40626,15 +40710,15 @@ "ComplianceSecurityControlId": "The security control ID for which a finding was generated. Security control IDs are the same across standards.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "ComplianceStatus": "The result of a security check. This field is only used for findings generated from controls.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "Confidence": "The likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. `Confidence` is scored on a 0\u2013100 basis using a ratio scale. A value of `0` means 0 percent confidence, and a value of `100` means 100 percent confidence. For example, a data exfiltration detection based on a statistical deviation of network traffic has low confidence because an actual exfiltration hasn't been verified. For more information, see [Confidence](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-top-level-attributes.html#asff-confidence) in the *AWS Security Hub User Guide* .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "CreatedAt": "A timestamp that indicates when this finding record was created.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "CreatedAt": "A timestamp that indicates when this finding record was created.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "Criticality": "The level of importance that is assigned to the resources that are associated with a finding. `Criticality` is scored on a 0\u2013100 basis, using a ratio scale that supports only full integers. A score of `0` means that the underlying resources have no criticality, and a score of `100` is reserved for the most critical resources. For more information, see [Criticality](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-top-level-attributes.html#asff-criticality) in the *AWS Security Hub User Guide* .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "Description": "A finding's description.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "FirstObservedAt": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "FirstObservedAt": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "GeneratorId": "The identifier for the solution-specific component that generated a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", "Id": "The product-specific identifier for a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "LastObservedAt": "A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "LastObservedAt": "A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "NoteText": "The text of a user-defined note that's added to a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "NoteUpdatedAt": "The timestamp of when the note was updated. Uses the date-time format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "NoteUpdatedAt": "The timestamp of when the note was updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "NoteUpdatedBy": "The principal that created a note.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "ProductArn": "The Amazon Resource Name (ARN) for a third-party product that generated a finding in Security Hub.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "ProductName": "Provides the name of the product that generated the finding. For control-based findings, the product name is Security Hub.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", @@ -40651,15 +40735,15 @@ "SourceUrl": "Provides a URL that links to a page about the current finding in the finding product.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "Title": "A finding's title.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", "Type": "One or more finding types in the format of namespace/category/classifier that classify a finding. For a list of namespaces, classifiers, and categories, see [Types taxonomy for ASFF](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-format-type-taxonomy.html) in the *AWS Security Hub User Guide* .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "UpdatedAt": "A timestamp that indicates when the finding record was most recently updated.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "UpdatedAt": "A timestamp that indicates when the finding record was most recently updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "UserDefinedFields": "A list of user-defined name and value string pairs added to a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "VerificationState": "Provides the veracity of a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "WorkflowStatus": "Provides information about the status of the investigation into a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items." }, "AWS::SecurityHub::AutomationRule DateFilter": { "DateRange": "A date range for the date filter.", - "End": "A timestamp that provides the end date for the date filter.\n\nA correctly formatted example is `2020-05-21T20:16:34.724Z` . The value cannot contain spaces, and date and time should be separated by `T` . For more information, see [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc3339#section-5.6) .", - "Start": "A timestamp that provides the start date for the date filter.\n\nA correctly formatted example is `2020-05-21T20:16:34.724Z` . The value cannot contain spaces, and date and time should be separated by `T` . For more information, see [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc3339#section-5.6) ." + "End": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "Start": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )" }, "AWS::SecurityHub::AutomationRule DateRange": { "Unit": "A date range unit for the date filter.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 1344dfe80..0ae42bb8f 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -1234,7 +1234,7 @@ "type": "array" }, "Workspace": { - "markdownDescription": "An Amazon Managed Service for Prometheus workspace is a logical and isolated Prometheus server dedicated to ingesting, storing, and querying your Prometheus-compatible metrics.", + "markdownDescription": "The ID of the workspace to add the rule groups namespace.", "title": "Workspace", "type": "string" } @@ -1267,6 +1267,185 @@ ], "type": "object" }, + "AWS::APS::Scraper": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Alias": { + "markdownDescription": "An optional user-assigned scraper alias.", + "title": "Alias", + "type": "string" + }, + "Destination": { + "$ref": "#/definitions/AWS::APS::Scraper.Destination", + "markdownDescription": "The Amazon Managed Service for Prometheus workspace the scraper sends metrics to.", + "title": "Destination" + }, + "ScrapeConfiguration": { + "$ref": "#/definitions/AWS::APS::Scraper.ScrapeConfiguration", + "markdownDescription": "The configuration in use by the scraper.", + "title": "ScrapeConfiguration" + }, + "Source": { + "$ref": "#/definitions/AWS::APS::Scraper.Source", + "markdownDescription": "The Amazon EKS cluster from which the scraper collects metrics.", + "title": "Source" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "(Optional) The list of tag keys and values associated with the scraper.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Destination", + "ScrapeConfiguration", + "Source" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::APS::Scraper" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::APS::Scraper.AmpConfiguration": { + "additionalProperties": false, + "properties": { + "WorkspaceArn": { + "markdownDescription": "ARN of the Amazon Managed Service for Prometheus workspace.", + "title": "WorkspaceArn", + "type": "string" + } + }, + "required": [ + "WorkspaceArn" + ], + "type": "object" + }, + "AWS::APS::Scraper.Destination": { + "additionalProperties": false, + "properties": { + "AmpConfiguration": { + "$ref": "#/definitions/AWS::APS::Scraper.AmpConfiguration", + "markdownDescription": "The Amazon Managed Service for Prometheus workspace to send metrics to.", + "title": "AmpConfiguration" + } + }, + "required": [ + "AmpConfiguration" + ], + "type": "object" + }, + "AWS::APS::Scraper.EksConfiguration": { + "additionalProperties": false, + "properties": { + "ClusterArn": { + "markdownDescription": "ARN of the Amazon EKS cluster.", + "title": "ClusterArn", + "type": "string" + }, + "SecurityGroupIds": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of the security group IDs for the Amazon EKS cluster VPC configuration.", + "title": "SecurityGroupIds", + "type": "array" + }, + "SubnetIds": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of subnet IDs for the Amazon EKS cluster VPC configuration.", + "title": "SubnetIds", + "type": "array" + } + }, + "required": [ + "ClusterArn", + "SubnetIds" + ], + "type": "object" + }, + "AWS::APS::Scraper.ScrapeConfiguration": { + "additionalProperties": false, + "properties": { + "ConfigurationBlob": { + "markdownDescription": "The base 64 encoded scrape configuration file.", + "title": "ConfigurationBlob", + "type": "string" + } + }, + "required": [ + "ConfigurationBlob" + ], + "type": "object" + }, + "AWS::APS::Scraper.Source": { + "additionalProperties": false, + "properties": { + "EksConfiguration": { + "$ref": "#/definitions/AWS::APS::Scraper.EksConfiguration", + "markdownDescription": "The Amazon EKS cluster from which a scraper collects metrics.", + "title": "EksConfiguration" + } + }, + "required": [ + "EksConfiguration" + ], + "type": "object" + }, "AWS::APS::Workspace": { "additionalProperties": false, "properties": { @@ -24908,7 +25087,7 @@ "type": "string" }, "Version": { - "markdownDescription": "Returns the version to use for the specified X12 transaction set. Supported versions are `4010` , `4030` , and `5010` .", + "markdownDescription": "Returns the version to use for the specified X12 transaction set.", "title": "Version", "type": "string" } @@ -25240,13 +25419,261 @@ "type": "string" }, "Version": { - "markdownDescription": "Returns the version to use for the specified X12 transaction set. Supported versions are `4010` , `4030` , and `5010` .", + "markdownDescription": "Returns the version to use for the specified X12 transaction set.", "title": "Version", "type": "string" } }, "type": "object" }, + "AWS::BCMDataExports::Export": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Export": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.Export", + "markdownDescription": "The details that are available for an export.", + "title": "Export" + }, + "Tags": { + "items": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.ResourceTag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Export" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::BCMDataExports::Export" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.DataQuery": { + "additionalProperties": false, + "properties": { + "QueryStatement": { + "markdownDescription": "The query statement.", + "title": "QueryStatement", + "type": "string" + }, + "TableConfigurations": { + "markdownDescription": "The table configuration.", + "title": "TableConfigurations", + "type": "object" + } + }, + "required": [ + "QueryStatement" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.DestinationConfigurations": { + "additionalProperties": false, + "properties": { + "S3Destination": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.S3Destination", + "markdownDescription": "An object that describes the destination of the data exports file.", + "title": "S3Destination" + } + }, + "required": [ + "S3Destination" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.Export": { + "additionalProperties": false, + "properties": { + "DataQuery": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.DataQuery", + "markdownDescription": "The data query for this specific data export.", + "title": "DataQuery" + }, + "Description": { + "markdownDescription": "The description for this specific data export.", + "title": "Description", + "type": "string" + }, + "DestinationConfigurations": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.DestinationConfigurations", + "markdownDescription": "The destination configuration for this specific data export.", + "title": "DestinationConfigurations" + }, + "ExportArn": { + "markdownDescription": "The Amazon Resource Name (ARN) for this export.", + "title": "ExportArn", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of this specific data export.", + "title": "Name", + "type": "string" + }, + "RefreshCadence": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.RefreshCadence", + "markdownDescription": "The cadence for AWS to update the export in your S3 bucket.", + "title": "RefreshCadence" + } + }, + "required": [ + "DataQuery", + "DestinationConfigurations", + "Name", + "RefreshCadence" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.RefreshCadence": { + "additionalProperties": false, + "properties": { + "Frequency": { + "markdownDescription": "The frequency that data exports are updated. The export refreshes each time the source data updates, up to three times daily.", + "title": "Frequency", + "type": "string" + } + }, + "required": [ + "Frequency" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.ResourceTag": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key that's associated with the tag.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value that's associated with the tag.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.S3Destination": { + "additionalProperties": false, + "properties": { + "S3Bucket": { + "markdownDescription": "The name of the Amazon S3 bucket used as the destination of a data export file.", + "title": "S3Bucket", + "type": "string" + }, + "S3OutputConfigurations": { + "$ref": "#/definitions/AWS::BCMDataExports::Export.S3OutputConfigurations", + "markdownDescription": "The output configuration for the data export.", + "title": "S3OutputConfigurations" + }, + "S3Prefix": { + "markdownDescription": "The S3 path prefix you want prepended to the name of your data export.", + "title": "S3Prefix", + "type": "string" + }, + "S3Region": { + "markdownDescription": "The S3 bucket Region.", + "title": "S3Region", + "type": "string" + } + }, + "required": [ + "S3Bucket", + "S3OutputConfigurations", + "S3Prefix", + "S3Region" + ], + "type": "object" + }, + "AWS::BCMDataExports::Export.S3OutputConfigurations": { + "additionalProperties": false, + "properties": { + "Compression": { + "markdownDescription": "The compression type for the data export.", + "title": "Compression", + "type": "string" + }, + "Format": { + "markdownDescription": "The file format for the data export.", + "title": "Format", + "type": "string" + }, + "OutputType": { + "markdownDescription": "The output type for the data export.", + "title": "OutputType", + "type": "string" + }, + "Overwrite": { + "markdownDescription": "The rule to follow when generating a version of the data export file. You have the choice to overwrite the previous version or to be delivered in addition to the previous versions. Overwriting exports can save on Amazon S3 storage costs. Creating new export versions allows you to track the changes in cost and usage data over time.", + "title": "Overwrite", + "type": "string" + } + }, + "required": [ + "Compression", + "Format", + "OutputType", + "Overwrite" + ], + "type": "object" + }, "AWS::Backup::BackupPlan": { "additionalProperties": false, "properties": { @@ -26286,7 +26713,7 @@ "items": { "type": "string" }, - "markdownDescription": "These are the types of recovery points.", + "markdownDescription": "These are the types of recovery points.\n\nInclude `SNAPSHOT` to restore only snapshot recovery points; include `CONTINUOUS` to restore continuous recovery points (point in time restore / PITR); use both to restore either a snapshot or a continuous recovery point. The recovery point will be determined by the value for `Algorithm` .", "title": "RecoveryPointTypes", "type": "array" }, @@ -31621,6 +32048,11 @@ "markdownDescription": "The IDs of the AWS accounts that are allowed to query by the custom analysis rule. Required when `allowedAnalyses` is `ANY_QUERY` .", "title": "AllowedAnalysisProviders", "type": "array" + }, + "DifferentialPrivacy": { + "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.DifferentialPrivacy", + "markdownDescription": "The differential privacy configuration.", + "title": "DifferentialPrivacy" } }, "required": [ @@ -31697,6 +32129,37 @@ }, "type": "object" }, + "AWS::CleanRooms::ConfiguredTable.DifferentialPrivacy": { + "additionalProperties": false, + "properties": { + "Columns": { + "items": { + "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.DifferentialPrivacyColumn" + }, + "markdownDescription": "", + "title": "Columns", + "type": "array" + } + }, + "required": [ + "Columns" + ], + "type": "object" + }, + "AWS::CleanRooms::ConfiguredTable.DifferentialPrivacyColumn": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the column, such as user_id, that contains the unique identifier of your users, whose privacy you want to protect. If you want to turn on differential privacy for two or more tables in a collaboration, you must configure the same column as the user identifier column in both analysis rules.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, "AWS::CleanRooms::ConfiguredTable.GlueTableReference": { "additionalProperties": false, "properties": { @@ -38126,6 +38589,8 @@ "additionalProperties": false, "properties": { "AccountId": { + "markdownDescription": "If the CloudWatch metric that provides the time series that the anomaly detector uses as input is in another account, specify that account ID here. If you omit this parameter, the current account is used.", + "title": "AccountId", "type": "string" }, "Dimensions": { @@ -39607,7 +40072,7 @@ "type": "string" }, "SourceVersion": { - "markdownDescription": "The source version for the corresponding source identifier. If specified, must be one of:\n\n- For CodeCommit: the commit ID, branch, or Git tag to use.\n- For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format `pr/pull-request-ID` (for example, `pr/25` ). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Amazon S3: the version ID of the object that represents the build input ZIP file to use.\n\nFor more information, see [Source Version Sample with CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) in the *AWS CodeBuild User Guide* .", + "markdownDescription": "The source version for the corresponding source identifier. If specified, must be one of:\n\n- For CodeCommit: the commit ID, branch, or Git tag to use.\n- For GitHub or GitLab: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format `pr/pull-request-ID` (for example, `pr/25` ). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Amazon S3: the version ID of the object that represents the build input ZIP file to use.\n\nFor more information, see [Source Version Sample with CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) in the *AWS CodeBuild User Guide* .", "title": "SourceVersion", "type": "string" } @@ -39719,7 +40184,7 @@ "type": "boolean" }, "Location": { - "markdownDescription": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", + "markdownDescription": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeStar Connections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", "title": "Location", "type": "string" }, @@ -39734,7 +40199,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub or GitHub Enterprise Cloud repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket.", + "markdownDescription": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `GITLAB` : The source code is in a GitLab repository.\n- `GITLAB_SELF_MANAGED` : The source code is in a self-managed GitLab repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket.", "title": "Type", "type": "string" } @@ -40000,12 +40465,12 @@ "additionalProperties": false, "properties": { "AuthType": { - "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, or PERSONAL_ACCESS_TOKEN.", + "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", "title": "AuthType", "type": "string" }, "ServerType": { - "markdownDescription": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, or BITBUCKET.", + "markdownDescription": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET.", "title": "ServerType", "type": "string" }, @@ -42689,6 +43154,11 @@ "title": "ConfigFile", "type": "string" }, + "PublishDeploymentStatus": { + "markdownDescription": "Whether to enable or disable publishing of deployment status to source providers.", + "title": "PublishDeploymentStatus", + "type": "string" + }, "RepositoryLinkId": { "markdownDescription": "The ID of the repository link associated with a specific sync configuration.", "title": "RepositoryLinkId", @@ -42708,6 +43178,11 @@ "markdownDescription": "The type of sync for a specific sync configuration.", "title": "SyncType", "type": "string" + }, + "TriggerResourceUpdateOn": { + "markdownDescription": "When to trigger Git sync to begin the stack update.", + "title": "TriggerResourceUpdateOn", + "type": "string" } }, "required": [ @@ -55213,7 +55688,7 @@ "type": "string" }, "CaptureDdls": { - "markdownDescription": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nIf this value is set to `N` , you don't have to create tables or triggers on the source database.", + "markdownDescription": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nThe default value is `true` .\n\nIf this value is set to `N` , you don't have to create tables or triggers on the source database.", "title": "CaptureDdls", "type": "boolean" }, @@ -55223,7 +55698,7 @@ "type": "string" }, "DdlArtifactsSchema": { - "markdownDescription": "The schema in which the operational DDL database artifacts are created.\n\nExample: `ddlArtifactsSchema=xyzddlschema;`", + "markdownDescription": "The schema in which the operational DDL database artifacts are created.\n\nThe default value is `public` .\n\nExample: `ddlArtifactsSchema=xyzddlschema;`", "title": "DdlArtifactsSchema", "type": "string" }, @@ -55233,37 +55708,37 @@ "type": "number" }, "FailTasksOnLobTruncation": { - "markdownDescription": "When set to `true` , this value causes a task to fail if the actual size of a LOB column is greater than the specified `LobMaxSize` .\n\nIf task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.", + "markdownDescription": "When set to `true` , this value causes a task to fail if the actual size of a LOB column is greater than the specified `LobMaxSize` .\n\nThe default value is `false` .\n\nIf task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.", "title": "FailTasksOnLobTruncation", "type": "boolean" }, "HeartbeatEnable": { - "markdownDescription": "The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps `restart_lsn` moving and prevents storage full scenarios.", + "markdownDescription": "The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps `restart_lsn` moving and prevents storage full scenarios.\n\nThe default value is `false` .", "title": "HeartbeatEnable", "type": "boolean" }, "HeartbeatFrequency": { - "markdownDescription": "Sets the WAL heartbeat frequency (in minutes).", + "markdownDescription": "Sets the WAL heartbeat frequency (in minutes).\n\nThe default value is 5 minutes.", "title": "HeartbeatFrequency", "type": "number" }, "HeartbeatSchema": { - "markdownDescription": "Sets the schema in which the heartbeat artifacts are created.", + "markdownDescription": "Sets the schema in which the heartbeat artifacts are created.\n\nThe default value is `public` .", "title": "HeartbeatSchema", "type": "string" }, "MapBooleanAsBoolean": { - "markdownDescription": "When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as `varchar(5)` . You must set this setting on both the source and target endpoints for it to take effect.", + "markdownDescription": "When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as `varchar(5)` . You must set this setting on both the source and target endpoints for it to take effect.\n\nThe default value is `false` .", "title": "MapBooleanAsBoolean", "type": "boolean" }, "MaxFileSize": { - "markdownDescription": "Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.\n\nExample: `maxFileSize=512`", + "markdownDescription": "Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.\n\nThe default value is 32,768 KB (32 MB).\n\nExample: `maxFileSize=512`", "title": "MaxFileSize", "type": "number" }, "PluginName": { - "markdownDescription": "Specifies the plugin to use to create a replication slot.", + "markdownDescription": "Specifies the plugin to use to create a replication slot.\n\nThe default value is `pglogical` .", "title": "PluginName", "type": "string" }, @@ -61020,21 +61495,6 @@ }, "type": "object" }, - "AWS::DataSync::Task.S3": { - "additionalProperties": false, - "properties": { - "BucketAccessRoleArn": { - "type": "string" - }, - "S3BucketArn": { - "type": "string" - }, - "Subdirectory": { - "type": "string" - } - }, - "type": "object" - }, "AWS::DataSync::Task.Skipped": { "additionalProperties": false, "properties": { @@ -63625,6 +64085,11 @@ "title": "AuthType", "type": "string" }, + "BackupRetentionPeriod": { + "markdownDescription": "The number of days for which automatic snapshots are retained.", + "title": "BackupRetentionPeriod", + "type": "number" + }, "ClusterName": { "markdownDescription": "The name of the new elastic cluster. This parameter is stored as a lowercase string.\n\n*Constraints* :\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- The first character must be a letter.\n- Cannot end with a hyphen or contain two consecutive hyphens.\n\n*Example* : `my-cluster`", "title": "ClusterName", @@ -63635,6 +64100,11 @@ "title": "KmsKeyId", "type": "string" }, + "PreferredBackupWindow": { + "markdownDescription": "The daily time range during which automated backups are created if automated backups are enabled, as determined by `backupRetentionPeriod` .", + "title": "PreferredBackupWindow", + "type": "string" + }, "PreferredMaintenanceWindow": { "markdownDescription": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\n*Format* : `ddd:hh24:mi-ddd:hh24:mi`\n\n*Default* : a 30-minute window selected at random from an 8-hour block of time for each AWS Region , occurring on a random day of the week.\n\n*Valid days* : Mon, Tue, Wed, Thu, Fri, Sat, Sun\n\n*Constraints* : Minimum 30-minute window.", "title": "PreferredMaintenanceWindow", @@ -63650,6 +64120,11 @@ "title": "ShardCount", "type": "number" }, + "ShardInstanceCount": { + "markdownDescription": "The number of replica instances applying to all shards in the cluster. A `shardInstanceCount` value of 1 means there is one writer instance, and any additional instances are replicas that can be used for reads and to improve availability.", + "title": "ShardInstanceCount", + "type": "number" + }, "SubnetIds": { "items": { "type": "string" @@ -68276,7 +68751,7 @@ "type": "array" }, "SubnetId": { - "markdownDescription": "The ID of the subnet to launch the instance into.\n\nIf you specify a network interface, you must specify any subnets as part of the network interface.", + "markdownDescription": "The ID of the subnet to launch the instance into.\n\nIf you specify a network interface, you must specify any subnets as part of the network interface instead of using this parameter.", "title": "SubnetId", "type": "string" }, @@ -68629,7 +69104,7 @@ "type": "number" }, "SubnetId": { - "markdownDescription": "The ID of the subnet associated with the network interface. Applies only if creating a network interface when launching an instance.", + "markdownDescription": "The ID of the subnet associated with the network interface.", "title": "SubnetId", "type": "string" } @@ -69670,7 +70145,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.NetworkInterface" }, - "markdownDescription": "One or more network interfaces. If you specify a network interface, you must specify any security groups and subnets as part of the network interface.", + "markdownDescription": "The network interfaces for the instance.", "title": "NetworkInterfaces", "type": "array" }, @@ -69693,7 +70168,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups. You can specify the IDs of existing security groups and references to resources created by the stack template.", + "markdownDescription": "The IDs of the security groups. You can specify the IDs of existing security groups and references to resources created by the stack template.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead.", "title": "SecurityGroupIds", "type": "array" }, @@ -69701,7 +70176,7 @@ "items": { "type": "string" }, - "markdownDescription": "One or more security group names. For a nondefault VPC, you must use security group IDs instead.", + "markdownDescription": "The names of the security groups. For a nondefault VPC, you must use security group IDs instead.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead of using this parameter.", "title": "SecurityGroups", "type": "array" }, @@ -69774,11 +70249,6 @@ "markdownDescription": "Disables the automatic recovery behavior of your instance or sets it to default.", "title": "AutoRecovery", "type": "string" - }, - "RebootMigration": { - "markdownDescription": "", - "title": "RebootMigration", - "type": "string" } }, "type": "object" @@ -73876,7 +74346,7 @@ "type": "number" }, "SubnetId": { - "markdownDescription": "The ID of the subnet associated with the network interface. Applies only if creating a network interface when launching an instance.", + "markdownDescription": "The ID of the subnet associated with the network interface.", "title": "SubnetId", "type": "string" } @@ -74263,7 +74733,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::SpotFleet.InstanceNetworkInterfaceSpecification" }, - "markdownDescription": "One or more network interfaces. If you specify a network interface, you must specify subnet IDs and security group IDs using the network interface.\n\n> `SpotFleetLaunchSpecification` currently does not support Elastic Fabric Adapter (EFA). To specify an EFA, you must use [LaunchTemplateConfig](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html) .", + "markdownDescription": "The network interfaces.", "title": "NetworkInterfaces", "type": "array" }, @@ -74281,7 +74751,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::SpotFleet.GroupIdentifier" }, - "markdownDescription": "The security groups.", + "markdownDescription": "The security groups.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead of using this parameter.", "title": "SecurityGroups", "type": "array" }, @@ -74291,7 +74761,7 @@ "type": "string" }, "SubnetId": { - "markdownDescription": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".", + "markdownDescription": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".\n\nIf you specify a network interface, you must specify any subnets as part of the network interface instead of using this parameter.", "title": "SubnetId", "type": "string" }, @@ -74638,6 +75108,11 @@ "title": "EnableDns64", "type": "boolean" }, + "EnableLniAtDeviceIndex": { + "markdownDescription": "Indicates the device position for local network interfaces in this subnet. For example, `1` indicates local network interfaces in this subnet are the secondary network interface (eth1).", + "title": "EnableLniAtDeviceIndex", + "type": "number" + }, "Ipv4IpamPoolId": { "markdownDescription": "An IPv4 IPAM pool ID for the subnet.", "title": "Ipv4IpamPoolId", @@ -80106,7 +80581,7 @@ "type": "number" }, "MinimumHealthyPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", "title": "MinimumHealthyPercent", "type": "number" } @@ -91336,7 +91811,6 @@ "required": [ "IdMappingTechniques", "InputSourceConfig", - "OutputSourceConfig", "RoleArn", "WorkflowName" ], @@ -91391,11 +91865,15 @@ "markdownDescription": "The ARN (Amazon Resource Name) that AWS Entity Resolution generated for the `SchemaMapping` .", "title": "SchemaArn", "type": "string" + }, + "Type": { + "markdownDescription": "", + "title": "Type", + "type": "string" } }, "required": [ - "InputSourceARN", - "SchemaArn" + "InputSourceARN" ], "type": "object" }, @@ -91702,7 +92180,7 @@ "title": "ProviderProperties" }, "ResolutionType": { - "markdownDescription": "The type of matching. There are two types of matching: `RULE_MATCHING` and `ML_MATCHING` .", + "markdownDescription": "The type of matching. There are three types of matching: `RULE_MATCHING` , `ML_MATCHING` , and `PROVIDER` .", "title": "ResolutionType", "type": "string" }, @@ -100376,7 +100854,7 @@ "additionalProperties": false, "properties": { "AttachmentArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints to accelerators.", + "markdownDescription": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints.", "title": "AttachmentArn", "type": "string" }, @@ -115061,7 +115539,7 @@ "title": "HealthEventsConfig" }, "IncludeLinkedAccounts": { - "markdownDescription": "", + "markdownDescription": "A boolean option that you can set to `TRUE` to include monitors for linked accounts in a list of monitors, when you've set up cross-account sharing in Internet Monitor. You configure cross-account sharing by using Amazon CloudWatch Observability Access Manager. For more information, see [Internet Monitor cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cwim-cross-account.html) in the Amazon CloudWatch User Guide.", "title": "IncludeLinkedAccounts", "type": "boolean" }, @@ -115071,7 +115549,7 @@ "title": "InternetMeasurementsLogDelivery" }, "LinkedAccountId": { - "markdownDescription": "", + "markdownDescription": "The account ID for an account that you've set up cross-account sharing for in Internet Monitor. You configure cross-account sharing by using Amazon CloudWatch Observability Access Manager. For more information, see [Internet Monitor cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cwim-cross-account.html) in the Amazon CloudWatch User Guide.", "title": "LinkedAccountId", "type": "string" }, @@ -124907,12 +125385,12 @@ "type": "array" }, "Description": { - "markdownDescription": "The description of the composite model.", + "markdownDescription": "The description of the composite model.\n\n> If the composite model is a `component-model-based` composite model, the description is inherited from the `COMPONENT_MODEL` asset model and cannot be changed.", "title": "Description", "type": "string" }, "ExternalId": { - "markdownDescription": "The external ID of a composite model on this asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The external ID of a composite model on this asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `Path` must be specified.", "title": "ExternalId", "type": "string" }, @@ -124927,7 +125405,7 @@ "type": "string" }, "ParentAssetModelCompositeModelExternalId": { - "markdownDescription": "The external ID of the parent asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> If `ParentCompositeModelExternalId` is specified, this value overrides the value of `ExternalId` , if both are included.", + "markdownDescription": "The external ID of the parent composite model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", "title": "ParentAssetModelCompositeModelExternalId", "type": "string" }, @@ -124935,7 +125413,7 @@ "items": { "type": "string" }, - "markdownDescription": "The structured path to the property from the root of the asset using property names. Path is used as the ID if the asset model is a derived composite model.", + "markdownDescription": "The structured path to the property from the root of the asset using property names. Path is used as the ID if the asset model is a derived composite model.\n\n> One of `ExternalId` or `Path` must be specified.", "title": "Path", "type": "array" }, @@ -124960,7 +125438,7 @@ "type": "string" }, "ExternalId": { - "markdownDescription": "The external ID (if any) provided in the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) operation. You can assign an external ID by specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) . However, you can't change the external ID if one is already assigned. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The external ID (if any) provided in the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) operation. You can assign an external ID by specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) . However, you can't change the external ID if one is already assigned. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "title": "ExternalId", "type": "string" }, @@ -124970,7 +125448,7 @@ "type": "string" }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .", + "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "title": "LogicalId", "type": "string" }, @@ -125000,7 +125478,7 @@ "type": "string" }, "ExternalId": { - "markdownDescription": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "title": "ExternalId", "type": "string" }, @@ -125010,7 +125488,7 @@ "type": "string" }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model property.", + "markdownDescription": "The `LogicalID` of the asset model property.\n\n> One of `ExternalId` or `LogicalId` must be specified.", "title": "LogicalId", "type": "string" }, @@ -130195,7 +130673,7 @@ "type": "number" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager . The credentials should be a user/password pair. For more information, see [Using a Database Data Source](https://docs.aws.amazon.com/kendra/latest/dg/data-source-database.html) . For more information about AWS Secrets Manager , see [What Is AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) in the *AWS Secrets Manager* user guide.", + "markdownDescription": "The Amazon Resource Name (ARN) of an AWS Secrets Manager secret that stores the credentials. The credentials should be a user-password pair. For more information, see [Using a Database Data Source](https://docs.aws.amazon.com/kendra/latest/dg/data-source-database.html) . For more information about AWS Secrets Manager , see [What Is AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) in the *AWS Secrets Manager* user guide.", "title": "SecretArn", "type": "string" }, @@ -130236,7 +130714,7 @@ "title": "PreExtractionHookConfiguration" }, "RoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of a role with permission to run `PreExtractionHookConfiguration` and `PostExtractionHookConfiguration` for altering document metadata and content during the document ingestion process. For more information, see [IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", + "markdownDescription": "The Amazon Resource Name (ARN) of an IAM role with permission to run `PreExtractionHookConfiguration` and `PostExtractionHookConfiguration` for altering document metadata and content during the document ingestion process. For more information, see [an IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", "title": "RoleArn", "type": "string" } @@ -130551,7 +131029,7 @@ "title": "InvocationCondition" }, "LambdaArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of a role with permission to run a Lambda function during ingestion. For more information, see [IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", + "markdownDescription": "The Amazon Resource Name (ARN) of an IAM role with permission to run a Lambda function during ingestion. For more information, see [an IAM roles for Amazon Kendra](https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html) .", "title": "LambdaArn", "type": "string" }, @@ -130650,7 +131128,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of users whose documents should be indexed. Specify the user names in email format, for example, `username@tenantdomain` . If you need to index the documents of more than 100 users, use the `OneDriveUserS3Path` field to specify the location of a file containing a list of users.", + "markdownDescription": "A list of users whose documents should be indexed. Specify the user names in email format, for example, `username@tenantdomain` . If you need to index the documents of more than 10 users, use the `OneDriveUserS3Path` field to specify the location of a file containing a list of users.", "title": "OneDriveUserList", "type": "array" }, @@ -130666,7 +131144,7 @@ "additionalProperties": false, "properties": { "Credentials": { - "markdownDescription": "Your secret ARN, which you can create in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nThe credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.", + "markdownDescription": "The Amazon Resource Name (ARN) of an AWS Secrets Manager secret. You create a secret to store your credentials in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nThe credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.", "title": "Credentials", "type": "string" }, @@ -131250,7 +131728,7 @@ "additionalProperties": false, "properties": { "Credentials": { - "markdownDescription": "Your secret ARN, which you can create in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nYou use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.", + "markdownDescription": "The Amazon Resource Name (ARN) of an AWS Secrets Manager secret. You create a secret to store your credentials in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nYou use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.", "title": "Credentials", "type": "string" }, @@ -149593,7 +150071,7 @@ "type": "string" }, "NetworkId": { - "markdownDescription": "The unique identifier of the network for the node.\n\nEthereum public networks have the following `NetworkId` s:\n\n- `n-ethereum-mainnet`\n- `n-ethereum-goerli`", + "markdownDescription": "The unique identifier of the network for the node.\n\nEthereum public networks have the following `NetworkId` s:\n\n- `n-ethereum-mainnet`", "title": "NetworkId", "type": "string" }, @@ -165912,7 +166390,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of strings that define which types of data that the source account shares with the monitoring account. Valid values are `AWS::CloudWatch::Metric | AWS::Logs::LogGroup | AWS::XRay::Trace | AWS::ApplicationInsights::Application` .", + "markdownDescription": "An array of strings that define which types of data that the source account shares with the monitoring account. Valid values are `AWS::CloudWatch::Metric | AWS::Logs::LogGroup | AWS::XRay::Trace | AWS::ApplicationInsights::Application | AWS::InternetMonitor::Monitor` .", "title": "ResourceTypes", "type": "array" }, @@ -176882,7 +177360,7 @@ }, "SelfManagedKafkaParameters": { "$ref": "#/definitions/AWS::Pipes::Pipe.PipeSourceSelfManagedKafkaParameters", - "markdownDescription": "The parameters for using a stream as a source.", + "markdownDescription": "The parameters for using a stream as a source.\n\nA *self managed* cluster refers to any Apache Kafka cluster not hosted by AWS . This includes both clusters you manage yourself, as well as those hosted by a third-party provider, such as [Confluent Cloud](https://docs.aws.amazon.com/https://www.confluent.io/) , [CloudKarafka](https://docs.aws.amazon.com/https://www.cloudkarafka.com/) , or [Redpanda](https://docs.aws.amazon.com/https://redpanda.com/) . For more information, see [Apache Kafka streams as a source](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-kafka.html) in the *Amazon EventBridge User Guide* .", "title": "SelfManagedKafkaParameters" }, "SqsQueueParameters": { @@ -218759,12 +219237,12 @@ }, "ScalingConfiguration": { "$ref": "#/definitions/AWS::RDS::DBCluster.ScalingConfiguration", - "markdownDescription": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The scaling configuration of an Aurora Serverless v1 DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora Serverless v1 DB clusters only", "title": "ScalingConfiguration" }, "ServerlessV2ScalingConfiguration": { "$ref": "#/definitions/AWS::RDS::DBCluster.ServerlessV2ScalingConfiguration", - "markdownDescription": "The `ServerlessV2ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora Serverless v2 DB clusters only", "title": "ServerlessV2ScalingConfiguration" }, "SnapshotIdentifier": { @@ -220624,6 +221102,16 @@ "title": "AdditionalEncryptionContext", "type": "object" }, + "DataFilter": { + "markdownDescription": "Data filters for the integration. These filters determine which tables from the source database are sent to the target Amazon Redshift data warehouse.", + "title": "DataFilter", + "type": "string" + }, + "Description": { + "markdownDescription": "A description of the integration.", + "title": "Description", + "type": "string" + }, "IntegrationName": { "markdownDescription": "The name of the integration.", "title": "IntegrationName", @@ -236868,7 +237356,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the contacts to add to the rotation.\n\nThe order in which you list the contacts is their shift order in the rotation schedule.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the contacts to add to the rotation.\n\n> Only the `PERSONAL` contact type is supported. The contact types `ESCALATION` and `ONCALL_SCHEDULE` are not supported for this operation. \n\nThe order in which you list the contacts is their shift order in the rotation schedule.", "title": "ContactIds", "type": "array" }, @@ -236896,7 +237384,7 @@ "type": "array" }, "TimeZoneId": { - "markdownDescription": "The time zone to base the rotation\u2019s activity on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"UTC\", or \"Asia/Seoul\". For more information, see the [Time Zone Database](https://docs.aws.amazon.com/https://www.iana.org/time-zones) on the IANA website.", + "markdownDescription": "The time zone to base the rotation\u2019s activity on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"UTC\", or \"Asia/Seoul\". For more information, see the [Time Zone Database](https://docs.aws.amazon.com/https://www.iana.org/time-zones) on the IANA website.\n\n> Designators for time zones that don\u2019t support Daylight Savings Time rules, such as Pacific Standard Time (PST), are not supported.", "title": "TimeZoneId", "type": "string" } @@ -248814,7 +249302,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when this finding record was created.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when this finding record was created.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "CreatedAt", "type": "array" }, @@ -248838,7 +249326,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "FirstObservedAt", "type": "array" }, @@ -248862,7 +249350,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "LastObservedAt", "type": "array" }, @@ -248878,7 +249366,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "The timestamp of when the note was updated. Uses the date-time format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "The timestamp of when the note was updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "NoteUpdatedAt", "type": "array" }, @@ -249014,7 +249502,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the finding record was most recently updated.\n\nUses the `date-time` format specified in [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3339#section-5.6) . The value cannot contain spaces. For example, `2020-03-22T13:22:13.933Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the finding record was most recently updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "UpdatedAt", "type": "array" }, @@ -249054,12 +249542,12 @@ "title": "DateRange" }, "End": { - "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nA correctly formatted example is `2020-05-21T20:16:34.724Z` . The value cannot contain spaces, and date and time should be separated by `T` . For more information, see [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc3339#section-5.6) .", + "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", "title": "End", "type": "string" }, "Start": { - "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nA correctly formatted example is `2020-05-21T20:16:34.724Z` . The value cannot contain spaces, and date and time should be separated by `T` . For more information, see [RFC 3339 section 5.6, Internet Date/Time Format](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc3339#section-5.6) .", + "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", "title": "Start", "type": "string" } @@ -249406,6 +249894,142 @@ ], "type": "object" }, + "AWS::SecurityLake::DataLake": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "EncryptionConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.EncryptionConfiguration" + }, + "LifecycleConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.LifecycleConfiguration" + }, + "MetaStoreManagerRoleArn": { + "type": "string" + }, + "ReplicationConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.ReplicationConfiguration" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityLake::DataLake" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::SecurityLake::DataLake.EncryptionConfiguration": { + "additionalProperties": false, + "properties": { + "KmsKeyId": { + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.Expiration": { + "additionalProperties": false, + "properties": { + "Days": { + "type": "number" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.LifecycleConfiguration": { + "additionalProperties": false, + "properties": { + "Expiration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.Expiration" + }, + "Transitions": { + "items": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.Transitions" + }, + "type": "array" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.ReplicationConfiguration": { + "additionalProperties": false, + "properties": { + "Regions": { + "items": { + "type": "string" + }, + "type": "array" + }, + "RoleArn": { + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.Transitions": { + "additionalProperties": false, + "properties": { + "Days": { + "type": "number" + }, + "StorageClass": { + "type": "string" + } + }, + "type": "object" + }, "AWS::ServiceCatalog::AcceptedPortfolioShare": { "additionalProperties": false, "properties": { @@ -265811,6 +266435,9 @@ { "$ref": "#/definitions/AWS::APS::RuleGroupsNamespace" }, + { + "$ref": "#/definitions/AWS::APS::Scraper" + }, { "$ref": "#/definitions/AWS::APS::Workspace" }, @@ -266150,6 +266777,9 @@ { "$ref": "#/definitions/AWS::B2BI::Transformer" }, + { + "$ref": "#/definitions/AWS::BCMDataExports::Export" + }, { "$ref": "#/definitions/AWS::Backup::BackupPlan" }, @@ -269099,6 +269729,9 @@ { "$ref": "#/definitions/AWS::SecurityHub::Standard" }, + { + "$ref": "#/definitions/AWS::SecurityLake::DataLake" + }, { "$ref": "#/definitions/AWS::ServiceCatalog::AcceptedPortfolioShare" },